]> git.llucax.com Git - software/subdivxget.git/blobdiff - subdivxget
Group subtitle extensions in a variable
[software/subdivxget.git] / subdivxget
index 4ebcca5a71abf2bd94aa04b9c93b1003af36f240..954b80a641a5e11cc3ce017eb4ecf87e7e1b9d64 100755 (executable)
@@ -1,10 +1,20 @@
 #!/usr/bin/env python
 
 import sys
 #!/usr/bin/env python
 
 import sys
-import urllib
+if sys.version_info[0] < 3:
+       from HTMLParser import HTMLParser
+       from urllib import urlopen, urlretrieve, urlencode
+       def get_encoding(info):
+               return info.getparam('charset')
+
+else:
+       from html.parser import HTMLParser
+       from urllib.request import urlopen, urlretrieve
+       from urllib.parse import urlencode
+       def get_encoding(info):
+               return info.get_content_charset('ascii')
 import zipfile
 import subprocess
 import zipfile
 import subprocess
-import HTMLParser
 
 class SubDivXQuery:
        def __init__(self, to_search, page_number):
 
 class SubDivXQuery:
        def __init__(self, to_search, page_number):
@@ -22,22 +32,22 @@ class SubDivXQuery:
        @property
        def url(self):
                return 'http://%s%s?%s' % (self.host, self.page,
        @property
        def url(self):
                return 'http://%s%s?%s' % (self.host, self.page,
-                               urllib.urlencode(self.query))
+                               urlencode(self.query))
        @property
        def page_uri(self):
        @property
        def page_uri(self):
-               return self.page + '?' + urllib.urlencode(self.query)
+               return self.page + '?' + urlencode(self.query)
        @property
        def down_uri(self):
                return 'http://' + self.host + self.down_page
 
 
        @property
        def down_uri(self):
                return 'http://' + self.host + self.down_page
 
 
-class SubDivXHTMLParser(HTMLParser.HTMLParser):
+class SubDivXHTMLParser(HTMLParser):
 
        IDLE = 1
        HEADER = 2
 
        def __init__(self, down_uri):
 
        IDLE = 1
        HEADER = 2
 
        def __init__(self, down_uri):
-               HTMLParser.HTMLParser.__init__(self)
+               HTMLParser.__init__(self)
                self.down_uri = down_uri
                self.depth = 0
                self.parsing = False
                self.down_uri = down_uri
                self.depth = 0
                self.parsing = False
@@ -118,16 +128,71 @@ class SubDivXHTMLParser(HTMLParser.HTMLParser):
                        self.cur[self.attr] = ''
 
 
                        self.cur[self.attr] = ''
 
 
+def filter_subtitles(subs, filters):
+       def is_good(sub, filter):
+               def is_any_good(sub, filter):
+                       for value in sub.values():
+                               if value.lower().find(filter) >= 0:
+                                       return True
+
+               field = None
+               if len(filter) > 2 and filter[1] == ':':
+                       field = filter[0]
+                       filter = filter[2:]
+               filter = filter.lower()
+
+               if field is None:
+                       return is_any_good(sub, filter)
+               elif field == 't':
+                       key = 'titulo'
+               elif field == 'd':
+                       key = 'desc'
+               elif field == 'a':
+                       key = 'autor'
+               elif field == 'f':
+                       key = 'formato'
+               elif field == 'c':
+                       key = 'comentarios'
+               elif field == 'C':
+                       key = 'cds'
+               elif field == 'F':
+                       key = 'fecha'
+               elif field == 'D':
+                       key = 'downloads'
+               else:
+                       # Not a recognizer field identifier, use the raw filter
+                       return is_any_good(sub, field + ':' + filter)
+
+               return sub[key].lower().find(filter) >= 0
+
+       if not filters:
+               return subs
+
+       result = []
+       for sub in subs:
+               for filter in filters:
+                       if not is_good(sub, filter):
+                               break
+               else:
+                       result.append(sub)
+       return result
+
+
 def subdivx_get_subs(query_str):
        page_number = 1
        subs = []
        while True:
                query = SubDivXQuery(query_str, page_number)
 def subdivx_get_subs(query_str):
        page_number = 1
        subs = []
        while True:
                query = SubDivXQuery(query_str, page_number)
-               url = urllib.urlopen(query.url)
+               url = urlopen(query.url)
                parser = SubDivXHTMLParser(query.down_uri)
 
                parser = SubDivXHTMLParser(query.down_uri)
 
+               try:
+                       encoding = get_encoding(url.info())
+               except:
+                       encoding = 'ascii'
+
                for line in url:
                for line in url:
-                       parser.feed(line)
+                       parser.feed(line.decode(encoding))
 
                url.close()
 
 
                url.close()
 
@@ -140,37 +205,41 @@ def subdivx_get_subs(query_str):
        return sorted(subs, key=lambda s: int(s['downloads']), reverse=True)
 
 
        return sorted(subs, key=lambda s: int(s['downloads']), reverse=True)
 
 
-def get_subs(query_str):
+def get_subs(query_str, filters):
+       sub_exts = ('.srt', '.sub')
        zip_exts = ('application/zip',)
        rar_exts = ('application/rar', 'application/x-rar-compressed')
 
        zip_exts = ('application/zip',)
        rar_exts = ('application/rar', 'application/x-rar-compressed')
 
-       for sub in subdivx_get_subs(query_str):
-               print '''\
-       - %(titulo)s (%(autor)s - %(fecha)s - %(downloads)s - %(comentarios)s)
-         %(desc)s
-               DOWNLOADING ...
-       ''' % sub
-               fname, headers = urllib.urlretrieve(sub['url'])
+       subs = subdivx_get_subs(query_str)
+       subs = filter_subtitles(subs, filters)
+
+       for sub in subs:
+               print('''\
+- %(titulo)s (%(autor)s - %(fecha)s - %(downloads)s - %(comentarios)s)
+  %(desc)s
+       DOWNLOADING ...
+''' % sub)
+               continue
+               fname, headers = urlretrieve(sub['url'])
                if 'Content-Type' in headers:
                        if headers['Content-Type'] in zip_exts:
                                z = zipfile.ZipFile(fname, 'r')
                                z.printdir()
                                for fn in z.namelist():
                if 'Content-Type' in headers:
                        if headers['Content-Type'] in zip_exts:
                                z = zipfile.ZipFile(fname, 'r')
                                z.printdir()
                                for fn in z.namelist():
-                                       if fn.endswith('.srt') or fn.endswith('.sub'):
+                                       if fn.endswith(sub_exts):
                                                if '..' in fn or fn.startswith('/'):
                                                if '..' in fn or fn.startswith('/'):
-                                                       print 'Dangerous file name:', fn
+                                                       print('Dangerous file name:', fn)
                                                        continue
                                                        continue
-                                               print 'Extracting', fn, '...'
+                                               print('Extracting', fn, '...')
                                                z.extract(fn)
                        elif headers['Content-Type'] in rar_exts:
                                if subprocess.call(['rar', 'x', fname]) != 0:
                                                z.extract(fn)
                        elif headers['Content-Type'] in rar_exts:
                                if subprocess.call(['rar', 'x', fname]) != 0:
-                                       print 'Error unraring file %s' % fname
+                                       print('Error unraring file %s' % fname)
                        else:
                        else:
-                               print 'Unrecognized file type:', headers['Content-Type']
+                               print('Unrecognized file type:', headers['Content-Type'])
                else:
                else:
-                       print 'No Content-Type!'
+                       print('No Content-Type!')
 
 
 
 
-for q in sys.argv[1:]:
-       get_subs(q)
+get_subs(sys.argv[1], sys.argv[2:])