X-Git-Url: https://git.phdru.name/?a=blobdiff_plain;f=Robots%2Fbkmk_rsimple.py;h=ae7c39e481be8e30b144981bf18a1a8aa32b19d2;hb=b9b055b270749b6af5d83f1e6390793d7b951888;hp=58879b01bc3a76f33b059dd7e7f272bd8375900e;hpb=a8c8b37737468dc990f12724ec21dca1404a1557;p=bookmarks_db.git diff --git a/Robots/bkmk_rsimple.py b/Robots/bkmk_rsimple.py index 58879b0..ae7c39e 100644 --- a/Robots/bkmk_rsimple.py +++ b/Robots/bkmk_rsimple.py @@ -21,6 +21,7 @@ class RedirectException(Exception): } def __init__(self, errcode, newurl): Exception.__init__(self, "(%s) to %s" % (self.reloc_dict[errcode], newurl)) + self.url = newurl class MyURLopener(urllib.URLopener): @@ -44,8 +45,7 @@ class MyURLopener(urllib.URLopener): urllib._urlopener = MyURLopener() -# Some sites allow only Mozilla-compatible browsers; way to stop robots? -server_version = "Mozilla/3.0 (compatible; Python-urllib/%s)" % urllib.__version__ +server_version = "bookmarks_db (Python-urllib/%s)" % urllib.__version__ urllib._urlopener.addheaders[0] = ('User-agent', server_version) @@ -83,13 +83,16 @@ from bkmk_objects import Robot from parse_html import parse_html class robot_simple(Robot): - def check_url(self, bookmark, url_type, url_rest): + def check_url(self, bookmark): if not self.tempfname: self.tempfname = bookmark.tempfname try: try: self.start = int(time.time()) + bookmark.icon = None + + url_type, url_rest = urllib.splittype(bookmark.href) url_host, url_path = urllib.splithost(url_rest) url_path, url_tag = urllib.splittag(url_path) @@ -142,9 +145,7 @@ class robot_simple(Robot): if self.log: self.log(" no charset in Content-Type header") if content_type == "text/html": parser = parse_html(fname, charset, self.log) - title = parser.title.replace('\r', '').replace('\n', ' ').strip() - bookmark.real_title = parser.unescape(title) - if self.log: self.log(" final title : %s" % bookmark.real_title) + bookmark.real_title = parser.title if parser.refresh: refresh = parser.refresh try: @@ -152,22 +153,33 @@ class robot_simple(Robot): except IndexError: url = "self" try: - timeout = int(refresh.split(';')[0]) + timeout = float(refresh.split(';')[0]) except (IndexError, ValueError): - timeout = None - if timeout is None: raise RedirectException("html", "Bad redirect to %s (%s)" % (url, refresh)) else: - raise RedirectException("html", "%s (%d sec)" % (url, timeout)) + try: + timeout = int(refresh.split(';')[0]) + except ValueError: + pass # float timeout + raise RedirectException("html", "%s (%s sec)" % (url, timeout)) # Get favicon.ico icon = parser.icon if not icon: icon = "/favicon.ico" icon = urljoin("%s://%s%s" % (url_type, url_host, url_path), icon) - if self.log: self.log(" icon : %s" % icon) + if self.log: self.log(" looking for icon at : %s" % icon) try: - fname, headers = urllib.urlretrieve(icon) + for i in range(8): + try: + fname, headers = urllib.urlretrieve(icon) + except RedirectException, e: + icon = e.url + if self.log: self.log(" redirect to : %s" % icon) + else: + break + else: + raise IOError("Too many redirects") except: etype, emsg, tb = sys.exc_info() if self.log: self.log(" no icon : %s %s" % (etype, emsg)) @@ -175,12 +187,16 @@ class robot_simple(Robot): emsg = None tb = None else: - icon_file = open(fname, "rb") - icon = icon_file.read() - icon_file.close() - os.remove(fname) content_type = headers["Content-Type"] - bookmark.icon = "data:%s;base64,%s" % (content_type, b64encode(icon)) + if content_type.startswith("image/"): + icon_file = open(fname, "rb") + icon = icon_file.read() + icon_file.close() + bookmark.icon = "data:%s;base64,%s" % (content_type, b64encode(icon)) + if self.log: self.log(" got icon : %s" % content_type) + else: + if self.log: self.log(" no icon : bad content type %s" % content_type) + os.remove(fname) except KeyError: pass