X-Git-Url: https://git.phdru.name/?a=blobdiff_plain;f=Robots%2Fbkmk_rsimple.py;h=2591588b9362743f44ade9bf5ca1dedce2f1b54a;hb=9f0cf5475608f64848242072211aa64969098a01;hp=c34a12728d136307da774e5a6ac086622bf51515;hpb=d655a6006176ecd02b8bbf6fe942a8f518f30e13;p=bookmarks_db.git diff --git a/Robots/bkmk_rsimple.py b/Robots/bkmk_rsimple.py index c34a127..2591588 100644 --- a/Robots/bkmk_rsimple.py +++ b/Robots/bkmk_rsimple.py @@ -5,8 +5,9 @@ """ -import string, os +import sys, os import time, urllib +from base64 import b64encode from urlparse import urljoin from m_lib.net.www.util import parse_time from m_lib.md5wrapper import md5wrapper @@ -20,6 +21,7 @@ class RedirectException(Exception): } def __init__(self, errcode, newurl): Exception.__init__(self, "(%s) to %s" % (self.reloc_dict[errcode], newurl)) + self.url = newurl class MyURLopener(urllib.URLopener): @@ -43,20 +45,30 @@ class MyURLopener(urllib.URLopener): urllib._urlopener = MyURLopener() -# Some sites allow only Mozilla-compatible browsers; way to stop robots? -server_version = "Mozilla/3.0 (compatible; Python-urllib/%s)" % urllib.__version__ -urllib._urlopener.addheaders[0] = ('User-agent', server_version) +# Fake headers to pretend this is a real browser +_version = "Links (2.1; Linux 2.6 i686; 80x25)" +urllib._urlopener.addheaders[0] = ('User-Agent', _version) +_version = "bookmarks_db (Python %d.%d.%d; urllib/%s)" % ( + sys.version_info[0], sys.version_info[1], sys.version_info[2], urllib.__version__) +urllib._urlopener.addheader('X-User-Agent', _version) + +urllib._urlopener.addheader('Connection', 'close') +urllib._urlopener.addheader('Content-Length', '0') +urllib._urlopener.addheader('Accept', '*/*') +urllib._urlopener.addheader('Accept-Language', 'ru,en') +urllib._urlopener.addheader('Cache-Control', 'max-age=300') +urllib._urlopener.addheader('Referer', 'http://www.yahoo.com/') def get_error(msg): - if type(msg) == type(""): + if isinstance(msg, str): return msg else: s = [] for i in msg: - s.append("'%s'" % string.join(string.split(str(i), "\n"), "\\n")) - return "(%s)" % string.join(s) + s.append("'%s'" % str(i).replace('\n', "\\n")) + return "(%s)" % ' '.join(s) urllib_ftpwrapper = urllib.ftpwrapper @@ -66,7 +78,7 @@ class myftpwrapper(urllib_ftpwrapper): def __init__(self, user, passwd, host, port, dirs): urllib_ftpwrapper.__init__(self, user, passwd, host, port, dirs) global ftpcache_key - ftpcache_key = (user, host, port, string.join(dirs, '/')) + ftpcache_key = (user, host, port, tuple(dirs)) urllib.ftpwrapper = myftpwrapper @@ -82,13 +94,16 @@ from bkmk_objects import Robot from parse_html import parse_html class robot_simple(Robot): - def check_url(self, bookmark, url_type, url_rest): + def check_url(self, bookmark): if not self.tempfname: self.tempfname = bookmark.tempfname try: try: self.start = int(time.time()) + bookmark.icon = None + + url_type, url_rest = urllib.splittype(bookmark.href) url_host, url_path = urllib.splithost(url_rest) url_path, url_tag = urllib.splittag(url_path) @@ -135,15 +150,13 @@ class robot_simple(Robot): content_type, charset = content_type.split(';') content_type = content_type.strip() charset = charset.split('=')[1].strip() - if self.log: self.log(" HTTP charset : %s" % charset) + self.log(" HTTP charset : %s" % charset) except (ValueError, IndexError): charset = None - if self.log: self.log(" no charset in Content-Type header") + self.log(" no charset in Content-Type header") if content_type == "text/html": parser = parse_html(fname, charset, self.log) - title = parser.title.replace('\r', '').replace('\n', ' ').strip() - bookmark.real_title = parser.unescape(title) - if self.log: self.log(" final title : %s" % bookmark.real_title) + bookmark.real_title = parser.title if parser.refresh: refresh = parser.refresh try: @@ -151,18 +164,51 @@ class robot_simple(Robot): except IndexError: url = "self" try: - timeout = int(refresh.split(';')[0]) + timeout = float(refresh.split(';')[0]) except (IndexError, ValueError): - timeout = None - if timeout is None: raise RedirectException("html", "Bad redirect to %s (%s)" % (url, refresh)) else: - raise RedirectException("html", "%s (%d sec)" % (url, timeout)) + try: + timeout = int(refresh.split(';')[0]) + except ValueError: + pass # float timeout + raise RedirectException("html", "%s (%s sec)" % (url, timeout)) + + # Get favicon.ico icon = parser.icon if not icon: icon = "/favicon.ico" - icon = urljoin("%s://%s" % (url_type, url_host), icon) - if self.log: self.log(" icon : %s" % icon) + icon = urljoin("%s://%s%s" % (url_type, url_host, url_path), icon) + self.log(" looking for icon at: %s" % icon) + try: + for i in range(8): + try: + fname, headers = urllib.urlretrieve(icon) + except RedirectException, e: + icon = e.url + self.log(" redirect to : %s" % icon) + else: + break + else: + raise IOError("Too many redirects") + except: + etype, emsg, tb = sys.exc_info() + self.log(" no icon : %s %s" % (etype, emsg)) + etype = None + emsg = None + tb = None + else: + content_type = headers["Content-Type"] + if content_type.startswith("image/"): + icon_file = open(fname, "rb") + icon = icon_file.read() + icon_file.close() + bookmark.icon = "data:%s;base64,%s" % (content_type, b64encode(icon)) + self.log(" got icon : %s" % content_type) + else: + self.log(" no icon : bad content type '%s'" % content_type) + os.remove(fname) + except KeyError: pass