import HTMLParser
class SubDivXQuery:
- def __init__(self, to_search):
+ def __init__(self, to_search, page_number):
self.host = "www.subdivx.com"
self.page = "/index.php"
self.down_page = "/bajar.php"
self.query = dict(
buscar = to_search,
+ pg = page_number,
accion = 5,
masdesc = '',
subtitulos = 1,
def down_uri(self):
return 'http://' + self.host + self.down_page
-class Subtitle:
- pass
class SubDivXHTMLParser(HTMLParser.HTMLParser):
self.parsing = False
self.subs = []
self.attr = None
+ self.attr_depth = 0
+ self.cur = None
+ self.in_script_style = False
def handle_starttag(self, tag, attrs):
attrs = dict(attrs)
if tag == 'div' and attrs.get('id') == 'menu_detalle_buscador':
- #self.cur = Subtitle()
self.cur = dict()
self.subs.append(self.cur)
self.parsing = True
if not self.parsing:
return
+ if tag == 'script' or tag == 'style':
+ self.in_script_style = True
+ return
if tag == 'div':
if attrs.get('id') == 'buscador_detalle':
self.parsing = True
elif attrs.get('id') == 'buscador_detalle_sub':
self.attr = 'desc'
+ self.attr_depth = self.depth + 1
+ self.cur[self.attr] = ''
elif tag == 'a':
if attrs.get('class') == 'titulo_menu_izq':
- self.attr = 'title'
+ self.attr = 'titulo'
+ self.attr_depth = self.depth + 1
+ self.cur[self.attr] = ''
elif attrs.get('href', '').startswith(self.down_uri):
self.cur['url'] = attrs['href']
- if self.parsing:
+ # br are usually not closed, so ignore them in depth calculation
+ if self.parsing and tag != 'br':
self.depth += 1
def handle_endtag(self, tag):
if self.parsing:
- self.depth -= 1
+ if tag == 'script' or tag == 'style':
+ self.in_script_style = False
+ return
+ if self.depth == self.attr_depth:
+ self.attr = None
+ self.attr_depth = 0
+ # see comment in handle_starttag()
+ if tag != 'br':
+ self.depth -= 1
if self.depth == 0:
self.parsing = False
def handle_data(self, data):
- if self.parsing:
- data = data.strip()
- if self.attr is not None and data:
- self.cur[self.attr] = data
+ if not self.parsing:
+ return
+ data = data.strip()
+ # Hack to handle comments in <script> <style> which don't end
+ # up in handle_comment(), so we just ignore the whole tags
+ if self.in_script_style:
+ return
+ if self.attr is not None and data:
+ self.cur[self.attr] += ' ' + data
+ if self.attr_depth == 0:
+ self.cur[self.attr] = self.cur[self.attr].strip()
self.attr = None
- #self.cur[self.attr] = self.cur.get(self.attr, '') + data.strip()
- #setattr(self.cur, self.attr, data.strip())
- elif data in ('Downloads:', 'Cds:', 'Comentarios:',
- 'Formato:'):
- self.attr = data[:-1].lower()
- elif data == 'Subido por:':
- self.attr = 'autor'
- elif data == 'el':
- self.attr = 'fecha'
+ self.attr_depth = 0
+ elif data in ('Downloads:', 'Cds:', 'Comentarios:', 'Formato:'):
+ self.attr = data[:-1].lower()
+ self.attr_depth = 0
+ self.cur[self.attr] = ''
+ elif data == 'Subido por:':
+ self.attr = 'autor'
+ self.attr_depth = 0
+ self.cur[self.attr] = ''
+ elif data == 'el':
+ self.attr = 'fecha'
+ self.attr_depth = 0
+ self.cur[self.attr] = ''
+
+
+def filter_subtitles(subs, filters):
+ def is_good(sub, filter):
+ def is_any_good(sub, filter):
+ for value in sub.values():
+ if value.lower().find(filter) >= 0:
+ return True
+
+ field = None
+ if len(filter) > 2 and filter[1] == ':':
+ field = filter[0]
+ filter = filter[2:]
+ filter = filter.lower()
+
+ if field is None:
+ return is_any_good(sub, filter)
+ elif field == 't':
+ key = 'titulo'
+ elif field == 'd':
+ key = 'desc'
+ elif field == 'a':
+ key = 'autor'
+ elif field == 'f':
+ key = 'formato'
+ elif field == 'c':
+ key = 'comentarios'
+ elif field == 'C':
+ key = 'cds'
+ elif field == 'F':
+ key = 'fecha'
+ elif field == 'D':
+ key = 'downloads'
+ else:
+ # Not a recognizer field identifier, use the raw filter
+ return is_any_good(sub, field + ':' + filter)
+ return sub[key].lower().find(filter) >= 0
-def get_subs(query_str):
- query = SubDivXQuery(query_str)
+ if not filters:
+ return subs
- url = urllib.urlopen(query.url)
+ result = []
+ for sub in subs:
+ for filter in filters:
+ if not is_good(sub, filter):
+ break
+ else:
+ result.append(sub)
+ return result
+
+
+def subdivx_get_subs(query_str):
+ page_number = 1
+ subs = []
+ while True:
+ query = SubDivXQuery(query_str, page_number)
+ url = urllib.urlopen(query.url)
+ parser = SubDivXHTMLParser(query.down_uri)
+
+ for line in url:
+ parser.feed(line)
- parser = SubDivXHTMLParser(query.down_uri)
+ url.close()
- for line in url:
- parser.feed(line)
+ if not parser.subs:
+ break
- url.close()
+ subs.extend(parser.subs)
+ page_number += 1
+ return sorted(subs, key=lambda s: int(s['downloads']), reverse=True)
+
+
+def get_subs(query_str, filters):
zip_exts = ('application/zip',)
rar_exts = ('application/rar', 'application/x-rar-compressed')
- for sub in sorted(parser.subs, key=lambda s: int(s['downloads']), reverse=True):
+ subs = subdivx_get_subs(query_str)
+ subs = filter_subtitles(subs, filters)
+
+ for sub in subs:
print '''\
- - %(title)s (%(autor)s - %(fecha)s - %(downloads)s - %(comentarios)s)
- %(desc)s
- DOWNLOADING ...
- ''' % sub
+- %(titulo)s (%(autor)s - %(fecha)s - %(downloads)s - %(comentarios)s)
+ %(desc)s
+ DOWNLOADING ...
+''' % sub
fname, headers = urllib.urlretrieve(sub['url'])
if 'Content-Type' in headers:
if headers['Content-Type'] in zip_exts:
print 'No Content-Type!'
-for q in sys.argv[1:]:
- get_subs(q)
+get_subs(sys.argv[1], sys.argv[2:])