import HTMLParser
class SubDivXQuery:
- def __init__(self, to_search):
+ def __init__(self, to_search, page_number):
self.host = "www.subdivx.com"
self.page = "/index.php"
self.down_page = "/bajar.php"
self.query = dict(
buscar = to_search,
+ pg = page_number,
accion = 5,
masdesc = '',
subtitulos = 1,
def down_uri(self):
return 'http://' + self.host + self.down_page
-class Subtitle:
- pass
class SubDivXHTMLParser(HTMLParser.HTMLParser):
self.parsing = False
self.subs = []
self.attr = None
+ self.attr_depth = 0
+ self.cur = None
+ self.in_script_style = False
def handle_starttag(self, tag, attrs):
attrs = dict(attrs)
if tag == 'div' and attrs.get('id') == 'menu_detalle_buscador':
- #self.cur = Subtitle()
self.cur = dict()
self.subs.append(self.cur)
self.parsing = True
if not self.parsing:
return
+ if tag == 'script' or tag == 'style':
+ self.in_script_style = True
+ return
if tag == 'div':
if attrs.get('id') == 'buscador_detalle':
self.parsing = True
elif attrs.get('id') == 'buscador_detalle_sub':
self.attr = 'desc'
+ self.attr_depth = self.depth + 1
+ self.cur[self.attr] = ''
elif tag == 'a':
if attrs.get('class') == 'titulo_menu_izq':
- self.attr = 'title'
+ self.attr = 'titulo'
+ self.attr_depth = self.depth + 1
+ self.cur[self.attr] = ''
elif attrs.get('href', '').startswith(self.down_uri):
self.cur['url'] = attrs['href']
- if self.parsing:
+ # br are usually not closed, so ignore them in depth calculation
+ if self.parsing and tag != 'br':
self.depth += 1
def handle_endtag(self, tag):
if self.parsing:
- self.depth -= 1
+ if tag == 'script' or tag == 'style':
+ self.in_script_style = False
+ return
+ if self.depth == self.attr_depth:
+ self.attr = None
+ self.attr_depth = 0
+ # see comment in handle_starttag()
+ if tag != 'br':
+ self.depth -= 1
if self.depth == 0:
self.parsing = False
def handle_data(self, data):
- if self.parsing:
- data = data.strip()
- if self.attr is not None and data:
- self.cur[self.attr] = data
+ if not self.parsing:
+ return
+ data = data.strip()
+ # Hack to handle comments in <script> <style> which don't end
+ # up in handle_comment(), so we just ignore the whole tags
+ if self.in_script_style:
+ return
+ if self.attr is not None and data:
+ self.cur[self.attr] += ' ' + data
+ if self.attr_depth == 0:
+ self.cur[self.attr] = self.cur[self.attr].strip()
self.attr = None
- #self.cur[self.attr] = self.cur.get(self.attr, '') + data.strip()
- #setattr(self.cur, self.attr, data.strip())
- elif data in ('Downloads:', 'Cds:', 'Comentarios:',
- 'Formato:'):
- self.attr = data[:-1].lower()
- elif data == 'Subido por:':
- self.attr = 'autor'
- elif data == 'el':
- self.attr = 'fecha'
+ self.attr_depth = 0
+ elif data in ('Downloads:', 'Cds:', 'Comentarios:', 'Formato:'):
+ self.attr = data[:-1].lower()
+ self.attr_depth = 0
+ self.cur[self.attr] = ''
+ elif data == 'Subido por:':
+ self.attr = 'autor'
+ self.attr_depth = 0
+ self.cur[self.attr] = ''
+ elif data == 'el':
+ self.attr = 'fecha'
+ self.attr_depth = 0
+ self.cur[self.attr] = ''
-def get_subs(query_str):
- query = SubDivXQuery(query_str)
+def subdivx_get_subs(query_str):
+ page_number = 1
+ subs = []
+ while True:
+ query = SubDivXQuery(query_str, page_number)
+ url = urllib.urlopen(query.url)
+ parser = SubDivXHTMLParser(query.down_uri)
+
+ for line in url:
+ parser.feed(line)
- url = urllib.urlopen(query.url)
+ url.close()
- parser = SubDivXHTMLParser(query.down_uri)
+ if not parser.subs:
+ break
- for line in url:
- parser.feed(line)
+ subs.extend(parser.subs)
+ page_number += 1
- url.close()
+ return sorted(subs, key=lambda s: int(s['downloads']), reverse=True)
+
+def get_subs(query_str):
zip_exts = ('application/zip',)
rar_exts = ('application/rar', 'application/x-rar-compressed')
- for sub in sorted(parser.subs, key=lambda s: int(s['downloads']), reverse=True):
+ for sub in subdivx_get_subs(query_str):
print '''\
- - %(title)s (%(autor)s - %(fecha)s - %(downloads)s - %(comentarios)s)
+ - %(titulo)s (%(autor)s - %(fecha)s - %(downloads)s - %(comentarios)s)
%(desc)s
DOWNLOADING ...
''' % sub