X-Git-Url: https://git.phdru.name/?a=blobdiff_plain;f=Robots%2Fbkmk_rsimple.py;h=14a3f5804799365ebd0db637f8ecb04a3b593df9;hb=5ec101e58df6499cf2fe7ee38ead9e5a31cb59eb;hp=e2f4dadc1358ecee16a37be3709ca007d75d4a1f;hpb=387f77d110986aa12967c9cd788ab0e4f41f2be2;p=bookmarks_db.git diff --git a/Robots/bkmk_rsimple.py b/Robots/bkmk_rsimple.py index e2f4dad..14a3f58 100644 --- a/Robots/bkmk_rsimple.py +++ b/Robots/bkmk_rsimple.py @@ -1,10 +1,18 @@ """ - Simple, strightforward robot; guaranteed to has problems with timeouts :) + Simple, strightforward robot - Written by BroytMann, Mar 2000 - Aug 2002. Copyright (C) 2000-2002 PhiloSoft Design + Written by Oleg BroytMann. Copyright (C) 2000-2007 PhiloSoft Design. """ +import sys, os +import time, urllib +from base64 import b64encode +from urlparse import urljoin +from m_lib.net.www.util import parse_time +from m_lib.md5wrapper import md5wrapper + + class RedirectException(Exception): reloc_dict = { 301: "perm.", @@ -13,12 +21,7 @@ class RedirectException(Exception): } def __init__(self, errcode, newurl): Exception.__init__(self, "(%s) to %s" % (self.reloc_dict[errcode], newurl)) - - -import string, os -import time, urllib -from m_lib.net.www.util import parse_time -from m_lib.md5wrapper import md5wrapper + self.url = newurl class MyURLopener(urllib.URLopener): @@ -42,20 +45,30 @@ class MyURLopener(urllib.URLopener): urllib._urlopener = MyURLopener() -# Some sites allow only Mozilla-compatible browsers; way to stop robots? -server_version = "Mozilla/3.0 (compatible; Python-urllib/%s)" % urllib.__version__ -urllib._urlopener.addheaders[0] = ('User-agent', server_version) +# Fake headers to pretend this is a real browser +_version = "Links (2.1; Linux 2.6 i686; 80x25)" +urllib._urlopener.addheaders[0] = ('User-Agent', _version) +_version = "bookmarks_db (Python %d.%d.%d; urllib/%s)" % ( + sys.version_info[0], sys.version_info[1], sys.version_info[2], urllib.__version__) +urllib._urlopener.addheader('X-User-Agent', _version) + +urllib._urlopener.addheader('Connection', 'close') +urllib._urlopener.addheader('Content-Length', '0') +urllib._urlopener.addheader('Accept', '*/*') +urllib._urlopener.addheader('Accept-Language', 'ru,en') +urllib._urlopener.addheader('Cache-Control', 'max-age=300') +urllib._urlopener.addheader('Referer', 'http://www.yahoo.com/') def get_error(msg): - if type(msg) == type(""): + if isinstance(msg, str): return msg else: s = [] for i in msg: - s.append("'%s'" % string.join(string.split(str(i), "\n"), "\\n")) - return "(%s)" % string.join(s) + s.append("'%s'" % str(i).replace('\n', "\\n")) + return "(%s)" % ' '.join(s) urllib_ftpwrapper = urllib.ftpwrapper @@ -65,7 +78,7 @@ class myftpwrapper(urllib_ftpwrapper): def __init__(self, user, passwd, host, port, dirs): urllib_ftpwrapper.__init__(self, user, passwd, host, port, dirs) global ftpcache_key - ftpcache_key = (user, host, port, string.join(dirs, '/')) + ftpcache_key = (user, host, port, tuple(dirs)) urllib.ftpwrapper = myftpwrapper @@ -81,17 +94,22 @@ from bkmk_objects import Robot from parse_html import parse_html class robot_simple(Robot): - def check_url(self, bookmark, url_type, url_rest): + def check_url(self, bookmark): if not self.tempfname: self.tempfname = bookmark.tempfname try: try: self.start = int(time.time()) + bookmark.icon = None + + url_type, url_rest = urllib.splittype(bookmark.href) url_host, url_path = urllib.splithost(url_rest) url_path, url_tag = urllib.splittag(url_path) + if bookmark.charset: urllib._urlopener.addheader('Accept-Charset', bookmark.charset) fname, headers = urllib.urlretrieve("%s://%s%s" % (url_type, url_host, url_path), self.tempfname) + if bookmark.charset: del urllib._urlopener.addheaders[-1] size = 0 last_modified = None @@ -138,20 +156,59 @@ class robot_simple(Robot): if self.log: self.log(" no charset in Content-Type header") if content_type == "text/html": parser = parse_html(fname, charset, self.log) - title = parser.title.replace('\r', '').replace('\n', ' ').strip() - bookmark.real_title = parser.unescape(title) - if self.log: self.log(" final title : %s" % bookmark.real_title) + bookmark.real_title = parser.title if parser.refresh: refresh = parser.refresh - try: - timeout = int(refresh.split(';')[0]) - except (IndexError, ValueError): - timeout = "ERROR" try: url = refresh.split('=', 1)[1] except IndexError: url = "self" - raise RedirectException("html", "%s (%d sec)" % (url, timeout)) + try: + timeout = float(refresh.split(';')[0]) + except (IndexError, ValueError): + raise RedirectException("html", "Bad redirect to %s (%s)" % (url, refresh)) + else: + try: + timeout = int(refresh.split(';')[0]) + except ValueError: + pass # float timeout + raise RedirectException("html", "%s (%s sec)" % (url, timeout)) + + # Get favicon.ico + icon = parser.icon + if not icon: + icon = "/favicon.ico" + icon = urljoin("%s://%s%s" % (url_type, url_host, url_path), icon) + if self.log: self.log(" looking for icon at: %s" % icon) + try: + for i in range(8): + try: + fname, headers = urllib.urlretrieve(icon) + except RedirectException, e: + icon = e.url + if self.log: self.log(" redirect to : %s" % icon) + else: + break + else: + raise IOError("Too many redirects") + except: + etype, emsg, tb = sys.exc_info() + if self.log: self.log(" no icon : %s %s" % (etype, emsg)) + etype = None + emsg = None + tb = None + else: + content_type = headers["Content-Type"] + if content_type.startswith("image/"): + icon_file = open(fname, "rb") + icon = icon_file.read() + icon_file.close() + bookmark.icon = "data:%s;base64,%s" % (content_type, b64encode(icon)) + if self.log: self.log(" got icon : %s" % content_type) + else: + if self.log: self.log(" no icon : bad content type '%s'" % content_type) + os.remove(fname) + except KeyError: pass @@ -180,7 +237,7 @@ class robot_simple(Robot): def finish_check_url(self, bookmark): # Calculate these attributes even in case of an error if os.path.exists(self.tempfname): - size = str(os.stat(self.tempfname).st_size) + size = str(os.path.getsize(self.tempfname)) if size[-1] == 'L': size = size[:-1] bookmark.size = size