#!/usr/bin/env python
import sys
-import urllib
+if sys.version_info[0] < 3:
+ from HTMLParser import HTMLParser
+ from urllib import urlopen, urlretrieve, urlencode
+ def get_encoding(info):
+ return info.getparam('charset')
+
+else:
+ from html.parser import HTMLParser
+ from urllib.request import urlopen, urlretrieve
+ from urllib.parse import urlencode
+ def get_encoding(info):
+ return info.get_content_charset('ascii')
import zipfile
import subprocess
-import HTMLParser
+
+
+def output(fo, fmt, *args, **kargs):
+ if not args:
+ args = kargs
+ fo.write((fmt % args) + '\n')
+
+def echo(fmt, *args, **kargs):
+ output(sys.stdout, fmt, *args, **kargs)
+
+def error(fmt, *args, **kargs):
+ output(sys.stderr, fmt, *args, **kargs)
+
class SubDivXQuery:
def __init__(self, to_search, page_number):
@property
def url(self):
return 'http://%s%s?%s' % (self.host, self.page,
- urllib.urlencode(self.query))
+ urlencode(self.query))
@property
def page_uri(self):
- return self.page + '?' + urllib.urlencode(self.query)
+ return self.page + '?' + urlencode(self.query)
@property
def down_uri(self):
return 'http://' + self.host + self.down_page
-class SubDivXHTMLParser(HTMLParser.HTMLParser):
+class SubDivXHTMLParser(HTMLParser):
IDLE = 1
HEADER = 2
def __init__(self, down_uri):
- HTMLParser.HTMLParser.__init__(self)
+ HTMLParser.__init__(self)
self.down_uri = down_uri
self.depth = 0
self.parsing = False
subs = []
while True:
query = SubDivXQuery(query_str, page_number)
- url = urllib.urlopen(query.url)
+ url = urlopen(query.url)
parser = SubDivXHTMLParser(query.down_uri)
+ try:
+ encoding = get_encoding(url.info())
+ except:
+ encoding = 'ascii'
+
for line in url:
- parser.feed(line)
+ parser.feed(line.decode(encoding))
url.close()
subs.extend(parser.subs)
page_number += 1
- return sorted(subs, key=lambda s: int(s['downloads']), reverse=True)
+ return subs
+
+
+def unzip_subs(fname):
+ sub_exts = ('.srt', '.sub')
+ z = zipfile.ZipFile(fname, 'r')
+ z.printdir()
+ for fn in z.namelist():
+ if fn.endswith(sub_exts):
+ if '..' in fn or fn.startswith('/'):
+ error('Ignoring file with dangerous name: %s',
+ fn)
+ continue
+ echo('Extracting %s...', fn)
+ z.extract(fn)
def get_subs(query_str, filters):
+ global opts
zip_exts = ('application/zip',)
rar_exts = ('application/rar', 'application/x-rar-compressed')
subs = subdivx_get_subs(query_str)
subs = filter_subtitles(subs, filters)
+ subs.sort(key=lambda s: int(s['downloads']), reverse=True)
for sub in subs:
- print '''\
+ echo('''\
- %(titulo)s (%(autor)s - %(fecha)s - %(downloads)s - %(comentarios)s)
%(desc)s
DOWNLOADING ...
-''' % sub
- fname, headers = urllib.urlretrieve(sub['url'])
+''', **sub)
+ if opts.list_only:
+ continue
+ fname, headers = urlretrieve(sub['url'])
if 'Content-Type' in headers:
if headers['Content-Type'] in zip_exts:
- z = zipfile.ZipFile(fname, 'r')
- z.printdir()
- for fn in z.namelist():
- if fn.endswith('.srt') or fn.endswith('.sub'):
- if '..' in fn or fn.startswith('/'):
- print 'Dangerous file name:', fn
- continue
- print 'Extracting', fn, '...'
- z.extract(fn)
+ unzip_subs(fname)
elif headers['Content-Type'] in rar_exts:
if subprocess.call(['rar', 'x', fname]) != 0:
- print 'Error unraring file %s' % fname
+ error('Error unraring file %s', fname)
else:
- print 'Unrecognized file type:', headers['Content-Type']
+ error('Unrecognized file type:',
+ headers['Content-Type'])
else:
- print 'No Content-Type!'
+ error('No Content-Type!')
+
+
+def parse_args(argv):
+ from optparse import OptionParser
+ parser = OptionParser(usage="%prog [OPTIONS] QUERY [FILTER ...]",
+ description="""
+Download subtitles from subdivx.com searching the string QUERY. If FILTERs are
+specified, only subtitles that matches all those filters are downloaded.
+Filters have the format "X:fitler", where X is a field specification: t=titulo,
+d=desc, a=autor, f=formato, c=comentarios, C=cds, F=fecha and D=downloads.
+filter is a string that should be found on that field (case insensitive). If
+the format specifier is not known (or there isn't one) the filter string is
+looked in all the fields.
+ """.strip())
+ parser.add_option("-l", "--list-only",
+ default=False, action='store_true',
+ help="Don't download the subtitles, just list them")
+
+ (opts, args) = parser.parse_args()
+ if not args:
+ parser.error("Missing query string")
+
+ return (args[0], args[1:], opts)
+
+(query_str, filters, opts) = parse_args(sys.argv)
+get_subs(query_str, filters)
-get_subs(sys.argv[1], sys.argv[2:])