]> git.llucax.com Git - software/subdivxget.git/blobdiff - subdivxget
Use lowercase for help messages to match --help
[software/subdivxget.git] / subdivxget
index 5c5cc7a49cfab50003860880a380ff6737f67fdd..59971140ba6957dd771a575455f6b51d50bc3d00 100755 (executable)
@@ -1,18 +1,45 @@
 #!/usr/bin/env python
 
 import sys
 #!/usr/bin/env python
 
 import sys
-import urllib
+if sys.version_info[0] < 3:
+       from HTMLParser import HTMLParser
+       from urllib import urlopen, urlretrieve, urlencode
+       def get_encoding(info):
+               return info.getparam('charset')
+
+else:
+       from html.parser import HTMLParser
+       from urllib.request import urlopen, urlretrieve
+       from urllib.parse import urlencode
+       def get_encoding(info):
+               return info.get_content_charset('ascii')
 import zipfile
 import subprocess
 import zipfile
 import subprocess
-import HTMLParser
+
+
+def output(fo, fmt, *args, **kargs):
+       if not args:
+               args = kargs
+       fo.write((fmt % args) + '\n')
+
+def echo(fmt, *args, **kargs):
+       global opts
+       if opts.quiet:
+               return
+       output(sys.stdout, fmt, *args, **kargs)
+
+def error(fmt, *args, **kargs):
+       output(sys.stderr, fmt, *args, **kargs)
+
 
 class SubDivXQuery:
 
 class SubDivXQuery:
-       def __init__(self, to_search):
+       def __init__(self, to_search, page_number):
                self.host = "www.subdivx.com"
                self.page = "/index.php"
                self.down_page = "/bajar.php"
                self.query = dict(
                        buscar = to_search,
                self.host = "www.subdivx.com"
                self.page = "/index.php"
                self.down_page = "/bajar.php"
                self.query = dict(
                        buscar = to_search,
+                       pg = page_number,
                        accion = 5,
                        masdesc = '',
                        subtitulos = 1,
                        accion = 5,
                        masdesc = '',
                        subtitulos = 1,
@@ -21,117 +48,255 @@ class SubDivXQuery:
        @property
        def url(self):
                return 'http://%s%s?%s' % (self.host, self.page,
        @property
        def url(self):
                return 'http://%s%s?%s' % (self.host, self.page,
-                               urllib.urlencode(self.query))
+                               urlencode(self.query))
        @property
        def page_uri(self):
        @property
        def page_uri(self):
-               return self.page + '?' + urllib.urlencode(self.query)
+               return self.page + '?' + urlencode(self.query)
        @property
        def down_uri(self):
                return 'http://' + self.host + self.down_page
 
        @property
        def down_uri(self):
                return 'http://' + self.host + self.down_page
 
-class Subtitle:
-       pass
 
 
-class SubDivXHTMLParser(HTMLParser.HTMLParser):
+class SubDivXHTMLParser(HTMLParser):
 
        IDLE = 1
        HEADER = 2
 
        def __init__(self, down_uri):
 
        IDLE = 1
        HEADER = 2
 
        def __init__(self, down_uri):
-               HTMLParser.HTMLParser.__init__(self)
+               HTMLParser.__init__(self)
                self.down_uri = down_uri
                self.depth = 0
                self.parsing = False
                self.subs = []
                self.attr = None
                self.down_uri = down_uri
                self.depth = 0
                self.parsing = False
                self.subs = []
                self.attr = None
+               self.attr_depth = 0
+               self.cur = None
+               self.in_script_style = False
 
        def handle_starttag(self, tag, attrs):
                attrs = dict(attrs)
                if tag == 'div' and attrs.get('id') == 'menu_detalle_buscador':
 
        def handle_starttag(self, tag, attrs):
                attrs = dict(attrs)
                if tag == 'div' and attrs.get('id') == 'menu_detalle_buscador':
-                       #self.cur = Subtitle()
                        self.cur = dict()
                        self.subs.append(self.cur)
                        self.parsing = True
                if not self.parsing:
                        return
                        self.cur = dict()
                        self.subs.append(self.cur)
                        self.parsing = True
                if not self.parsing:
                        return
+               if tag == 'script' or tag == 'style':
+                       self.in_script_style = True
+                       return
                if tag == 'div':
                        if attrs.get('id') == 'buscador_detalle':
                                self.parsing = True
                        elif attrs.get('id') == 'buscador_detalle_sub':
                                self.attr = 'desc'
                if tag == 'div':
                        if attrs.get('id') == 'buscador_detalle':
                                self.parsing = True
                        elif attrs.get('id') == 'buscador_detalle_sub':
                                self.attr = 'desc'
+                               self.attr_depth = self.depth + 1
+                               self.cur[self.attr] = ''
                elif tag == 'a':
                        if attrs.get('class') == 'titulo_menu_izq':
                elif tag == 'a':
                        if attrs.get('class') == 'titulo_menu_izq':
-                               self.attr = 'title'
+                               self.attr = 'titulo'
+                               self.attr_depth = self.depth + 1
+                               self.cur[self.attr] = ''
                        elif attrs.get('href', '').startswith(self.down_uri):
                                self.cur['url'] = attrs['href']
                        elif attrs.get('href', '').startswith(self.down_uri):
                                self.cur['url'] = attrs['href']
-               if self.parsing:
+               # br are usually not closed, so ignore them in depth calculation
+               if self.parsing and tag != 'br':
                        self.depth += 1
 
        def handle_endtag(self, tag):
                if self.parsing:
                        self.depth += 1
 
        def handle_endtag(self, tag):
                if self.parsing:
-                       self.depth -= 1
+                       if tag == 'script' or tag == 'style':
+                               self.in_script_style = False
+                               return
+                       if self.depth == self.attr_depth:
+                               self.attr = None
+                               self.attr_depth = 0
+                       # see comment in handle_starttag()
+                       if tag != 'br':
+                               self.depth -= 1
                if self.depth == 0:
                        self.parsing = False
 
        def handle_data(self, data):
                if self.depth == 0:
                        self.parsing = False
 
        def handle_data(self, data):
-               if self.parsing:
-                       data = data.strip()
-                       if self.attr is not None and data:
-                               self.cur[self.attr] = data
+               if not self.parsing:
+                       return
+               data = data.strip()
+               # Hack to handle comments in <script> <style> which don't end
+               # up in handle_comment(), so we just ignore the whole tags
+               if self.in_script_style:
+                       return
+               if self.attr is not None and data:
+                       self.cur[self.attr] += ' ' + data
+                       if self.attr_depth == 0:
+                               self.cur[self.attr] = self.cur[self.attr].strip()
                                self.attr = None
                                self.attr = None
-                               #self.cur[self.attr] = self.cur.get(self.attr, '') + data.strip()
-                               #setattr(self.cur, self.attr, data.strip())
-                       elif data in ('Downloads:', 'Cds:', 'Comentarios:',
-                                       'Formato:'):
-                               self.attr = data[:-1].lower()
-                       elif data == 'Subido por:':
-                               self.attr = 'autor'
-                       elif data == 'el':
-                               self.attr = 'fecha'
+                               self.attr_depth = 0
+               elif data in ('Downloads:', 'Cds:', 'Comentarios:', 'Formato:'):
+                       self.attr = data[:-1].lower()
+                       self.attr_depth = 0
+                       self.cur[self.attr] = ''
+               elif data == 'Subido por:':
+                       self.attr = 'autor'
+                       self.attr_depth = 0
+                       self.cur[self.attr] = ''
+               elif data == 'el':
+                       self.attr = 'fecha'
+                       self.attr_depth = 0
+                       self.cur[self.attr] = ''
+
+
+def filter_subtitles(subs, filters):
+       def is_good(sub, filter):
+               def is_any_good(sub, filter):
+                       for value in sub.values():
+                               if value.lower().find(filter) >= 0:
+                                       return True
+
+               field = None
+               if len(filter) > 2 and filter[1] == ':':
+                       field = filter[0]
+                       filter = filter[2:]
+               filter = filter.lower()
+
+               if field is None:
+                       return is_any_good(sub, filter)
+               elif field == 't':
+                       key = 'titulo'
+               elif field == 'd':
+                       key = 'desc'
+               elif field == 'a':
+                       key = 'autor'
+               elif field == 'f':
+                       key = 'formato'
+               elif field == 'c':
+                       key = 'comentarios'
+               elif field == 'C':
+                       key = 'cds'
+               elif field == 'F':
+                       key = 'fecha'
+               elif field == 'D':
+                       key = 'downloads'
+               else:
+                       # Not a recognizer field identifier, use the raw filter
+                       return is_any_good(sub, field + ':' + filter)
+
+               return sub[key].lower().find(filter) >= 0
+
+       if not filters:
+               return subs
+
+       result = []
+       for sub in subs:
+               for filter in filters:
+                       if not is_good(sub, filter):
+                               break
+               else:
+                       result.append(sub)
+       return result
+
+
+def subdivx_get_subs(query_str):
+       page_number = 1
+       subs = []
+       while True:
+               query = SubDivXQuery(query_str, page_number)
+               url = urlopen(query.url)
+               parser = SubDivXHTMLParser(query.down_uri)
+
+               try:
+                       encoding = get_encoding(url.info())
+               except:
+                       encoding = 'ascii'
+
+               for line in url:
+                       parser.feed(line.decode(encoding))
 
 
+               url.close()
 
 
-def get_subs(query_str):
-       query = SubDivXQuery(query_str)
+               if not parser.subs:
+                       break
 
 
-       url = urllib.urlopen(query.url)
+               subs.extend(parser.subs)
+               page_number += 1
 
 
-       parser = SubDivXHTMLParser(query.down_uri)
+       return subs
 
 
-       for line in url:
-               parser.feed(line)
 
 
-       url.close()
+def unzip_subs(fname):
+       sub_exts = ('.srt', '.sub')
+       z = zipfile.ZipFile(fname, 'r')
+       z.printdir()
+       for fn in z.namelist():
+               if fn.endswith(sub_exts):
+                       if '..' in fn or fn.startswith('/'):
+                               error('Ignoring file with dangerous name: %s',
+                                               fn)
+                               continue
+                       echo('Extracting %s...', fn)
+                       z.extract(fn)
 
 
+
+def get_subs(query_str, filters):
+       global opts
        zip_exts = ('application/zip',)
        rar_exts = ('application/rar', 'application/x-rar-compressed')
 
        zip_exts = ('application/zip',)
        rar_exts = ('application/rar', 'application/x-rar-compressed')
 
-       for sub in sorted(parser.subs, key=lambda s: int(s['downloads']), reverse=True):
-               print '''\
-       - %(title)s (%(autor)s - %(fecha)s - %(downloads)s - %(comentarios)s)
-         %(desc)s
-               DOWNLOADING ...
-       ''' % sub
-               fname, headers = urllib.urlretrieve(sub['url'])
+       subs = subdivx_get_subs(query_str)
+       subs = filter_subtitles(subs, filters)
+       subs.sort(key=lambda s: int(s['downloads']), reverse=True)
+
+       for sub in subs:
+               echo('''\
+- %(titulo)s (%(autor)s - %(fecha)s - %(downloads)s - %(comentarios)s)
+  %(desc)s
+       DOWNLOADING ...
+''', **sub)
+               if opts.list_only:
+                       continue
+               fname, headers = urlretrieve(sub['url'])
                if 'Content-Type' in headers:
                        if headers['Content-Type'] in zip_exts:
                if 'Content-Type' in headers:
                        if headers['Content-Type'] in zip_exts:
-                               z = zipfile.ZipFile(fname, 'r')
-                               z.printdir()
-                               for fn in z.namelist():
-                                       if fn.endswith('.srt') or fn.endswith('.sub'):
-                                               if '..' in fn or fn.startswith('/'):
-                                                       print 'Dangerous file name:', fn
-                                                       continue
-                                               print 'Extracting', fn, '...'
-                                               z.extract(fn)
+                               unzip_subs(fname)
                        elif headers['Content-Type'] in rar_exts:
                                if subprocess.call(['rar', 'x', fname]) != 0:
                        elif headers['Content-Type'] in rar_exts:
                                if subprocess.call(['rar', 'x', fname]) != 0:
-                                       print 'Error unraring file %s' % fname
+                                       error('Error unraring file %s', fname)
                        else:
                        else:
-                               print 'Unrecognized file type:', headers['Content-Type']
+                               error('Unrecognized file type:',
+                                               headers['Content-Type'])
                else:
                else:
-                       print 'No Content-Type!'
+                       error('No Content-Type!')
+
+
+def parse_args(argv):
+       from optparse import OptionParser
+       parser = OptionParser(usage="%prog [OPTIONS] QUERY [FILTER ...]",
+                       description="""
+Download subtitles from subdivx.com searching the string QUERY. If FILTERs are
+specified, only subtitles that matches all those filters are downloaded.
+Filters have the format "X:fitler", where X is a field specification: t=titulo,
+d=desc, a=autor, f=formato, c=comentarios, C=cds, F=fecha and D=downloads.
+filter is a string that should be found on that field (case insensitive). If
+the format specifier is not known (or there isn't one) the filter string is
+looked in all the fields.
+                       """.strip())
+       parser.add_option("-l", "--list-only",
+                       default=False, action='store_true',
+                       help="don't download the subtitles, just list them")
+       parser.add_option("-q", "--quiet",
+                       default=False, action='store_true',
+                       help="don't print progress messages")
+
+       (opts, args) = parser.parse_args()
+       if not args:
+              parser.error("Missing query string")
+
+       if opts.quiet and opts.list_only:
+               parser.error("Using --quiet and --list-only together doesn't "
+                               "make any sense")
+
+       return (args[0], args[1:], opts)
+
+(query_str, filters, opts) = parse_args(sys.argv)
 
 
+get_subs(query_str, filters)
 
 
-for q in sys.argv[1:]:
-       get_subs(q)