]> git.llucax.com Git - software/subdivxget.git/blobdiff - subdivxget
Add command line option to avoid downloading
[software/subdivxget.git] / subdivxget
index 38d46c40f41682ecaff6fc55d3ce595a7823427b..597110cbcb820cd0dc1d3d53fb25358eeda502ed 100755 (executable)
@@ -1,10 +1,33 @@
 #!/usr/bin/env python
 
 import sys
-import urllib
+if sys.version_info[0] < 3:
+       from HTMLParser import HTMLParser
+       from urllib import urlopen, urlretrieve, urlencode
+       def get_encoding(info):
+               return info.getparam('charset')
+
+else:
+       from html.parser import HTMLParser
+       from urllib.request import urlopen, urlretrieve
+       from urllib.parse import urlencode
+       def get_encoding(info):
+               return info.get_content_charset('ascii')
 import zipfile
 import subprocess
-import HTMLParser
+
+
+def output(fo, fmt, *args, **kargs):
+       if not args:
+               args = kargs
+       fo.write((fmt % args) + '\n')
+
+def echo(fmt, *args, **kargs):
+       output(sys.stdout, fmt, *args, **kargs)
+
+def error(fmt, *args, **kargs):
+       output(sys.stderr, fmt, *args, **kargs)
+
 
 class SubDivXQuery:
        def __init__(self, to_search, page_number):
@@ -22,22 +45,22 @@ class SubDivXQuery:
        @property
        def url(self):
                return 'http://%s%s?%s' % (self.host, self.page,
-                               urllib.urlencode(self.query))
+                               urlencode(self.query))
        @property
        def page_uri(self):
-               return self.page + '?' + urllib.urlencode(self.query)
+               return self.page + '?' + urlencode(self.query)
        @property
        def down_uri(self):
                return 'http://' + self.host + self.down_page
 
 
-class SubDivXHTMLParser(HTMLParser.HTMLParser):
+class SubDivXHTMLParser(HTMLParser):
 
        IDLE = 1
        HEADER = 2
 
        def __init__(self, down_uri):
-               HTMLParser.HTMLParser.__init__(self)
+               HTMLParser.__init__(self)
                self.down_uri = down_uri
                self.depth = 0
                self.parsing = False
@@ -173,11 +196,16 @@ def subdivx_get_subs(query_str):
        subs = []
        while True:
                query = SubDivXQuery(query_str, page_number)
-               url = urllib.urlopen(query.url)
+               url = urlopen(query.url)
                parser = SubDivXHTMLParser(query.down_uri)
 
+               try:
+                       encoding = get_encoding(url.info())
+               except:
+                       encoding = 'ascii'
+
                for line in url:
-                       parser.feed(line)
+                       parser.feed(line.decode(encoding))
 
                url.close()
 
@@ -187,42 +215,78 @@ def subdivx_get_subs(query_str):
                subs.extend(parser.subs)
                page_number += 1
 
-       return sorted(subs, key=lambda s: int(s['downloads']), reverse=True)
+       return subs
+
+
+def unzip_subs(fname):
+       sub_exts = ('.srt', '.sub')
+       z = zipfile.ZipFile(fname, 'r')
+       z.printdir()
+       for fn in z.namelist():
+               if fn.endswith(sub_exts):
+                       if '..' in fn or fn.startswith('/'):
+                               error('Ignoring file with dangerous name: %s',
+                                               fn)
+                               continue
+                       echo('Extracting %s...', fn)
+                       z.extract(fn)
 
 
 def get_subs(query_str, filters):
+       global opts
        zip_exts = ('application/zip',)
        rar_exts = ('application/rar', 'application/x-rar-compressed')
 
        subs = subdivx_get_subs(query_str)
        subs = filter_subtitles(subs, filters)
+       subs.sort(key=lambda s: int(s['downloads']), reverse=True)
 
        for sub in subs:
-               print '''\
+               echo('''\
 - %(titulo)s (%(autor)s - %(fecha)s - %(downloads)s - %(comentarios)s)
   %(desc)s
        DOWNLOADING ...
-''' % sub
-               fname, headers = urllib.urlretrieve(sub['url'])
+''', **sub)
+               if opts.list_only:
+                       continue
+               fname, headers = urlretrieve(sub['url'])
                if 'Content-Type' in headers:
                        if headers['Content-Type'] in zip_exts:
-                               z = zipfile.ZipFile(fname, 'r')
-                               z.printdir()
-                               for fn in z.namelist():
-                                       if fn.endswith('.srt') or fn.endswith('.sub'):
-                                               if '..' in fn or fn.startswith('/'):
-                                                       print 'Dangerous file name:', fn
-                                                       continue
-                                               print 'Extracting', fn, '...'
-                                               z.extract(fn)
+                               unzip_subs(fname)
                        elif headers['Content-Type'] in rar_exts:
                                if subprocess.call(['rar', 'x', fname]) != 0:
-                                       print 'Error unraring file %s' % fname
+                                       error('Error unraring file %s', fname)
                        else:
-                               print 'Unrecognized file type:', headers['Content-Type']
+                               error('Unrecognized file type:',
+                                               headers['Content-Type'])
                else:
-                       print 'No Content-Type!'
+                       error('No Content-Type!')
+
+
+def parse_args(argv):
+       from optparse import OptionParser
+       parser = OptionParser(usage="%prog [OPTIONS] QUERY [FILTER ...]",
+                       description="""
+Download subtitles from subdivx.com searching the string QUERY. If FILTERs are
+specified, only subtitles that matches all those filters are downloaded.
+Filters have the format "X:fitler", where X is a field specification: t=titulo,
+d=desc, a=autor, f=formato, c=comentarios, C=cds, F=fecha and D=downloads.
+filter is a string that should be found on that field (case insensitive). If
+the format specifier is not known (or there isn't one) the filter string is
+looked in all the fields.
+                       """.strip())
+       parser.add_option("-l", "--list-only",
+                       default=False, action='store_true',
+                       help="Don't download the subtitles, just list them")
+
+       (opts, args) = parser.parse_args()
+       if not args:
+              parser.error("Missing query string")
+
+       return (args[0], args[1:], opts)
+
+(query_str, filters, opts) = parse_args(sys.argv)
 
+get_subs(query_str, filters)
 
-get_subs(sys.argv[1], sys.argv[2:])