]> git.phdru.name Git - bookmarks_db.git/blobdiff - Robots/bkmk_rsimple.py
Cache icons.
[bookmarks_db.git] / Robots / bkmk_rsimple.py
index c34a12728d136307da774e5a6ac086622bf51515..96135824be74d83867458e0b0af73caf2df5687a 100644 (file)
@@ -4,13 +4,17 @@
    Written by Oleg BroytMann. Copyright (C) 2000-2007 PhiloSoft Design.
 """
 
-
-import string, os
+import sys, os
 import time, urllib
+from base64 import b64encode
 from urlparse import urljoin
+
 from m_lib.net.www.util import parse_time
 from m_lib.md5wrapper import md5wrapper
 
+from bkmk_objects import Robot
+from parse_html import parse_html
+
 
 class RedirectException(Exception):
    reloc_dict = {
@@ -20,6 +24,7 @@ class RedirectException(Exception):
    }
    def __init__(self, errcode, newurl):
       Exception.__init__(self, "(%s) to %s" % (self.reloc_dict[errcode], newurl))
+      self.url = newurl
 
 
 class MyURLopener(urllib.URLopener):
@@ -43,20 +48,29 @@ class MyURLopener(urllib.URLopener):
 
 urllib._urlopener = MyURLopener()
 
-# Some sites allow only Mozilla-compatible browsers; way to stop robots?
-server_version = "Mozilla/3.0 (compatible; Python-urllib/%s)" % urllib.__version__
-urllib._urlopener.addheaders[0] = ('User-agent', server_version)
+# Fake headers to pretend this is a real browser
+_version = "Mozilla/5.0 (X11; U; Linux 2.6 i686; en) Gecko/20001221 Firefox/2.0.0"
+urllib._urlopener.addheaders[0] = ('User-Agent', _version)
+_version = "bookmarks_db (Python %d.%d.%d; urllib/%s)" % (
+   sys.version_info[0], sys.version_info[1], sys.version_info[2], urllib.__version__)
+urllib._urlopener.addheader('X-User-Agent', _version)
+urllib._urlopener.addheader('Referer', '')
+
+urllib._urlopener.addheader('Connection', 'close')
+urllib._urlopener.addheader('Accept', '*/*')
+urllib._urlopener.addheader('Accept-Language', 'ru,en')
+urllib._urlopener.addheader('Cache-Control', 'max-age=300')
 
 
 def get_error(msg):
-   if type(msg) == type(""):
+   if isinstance(msg, str):
       return msg
 
    else:
       s = []
       for i in msg:
-         s.append("'%s'" % string.join(string.split(str(i), "\n"), "\\n"))
-      return "(%s)" % string.join(s)
+         s.append("'%s'" % str(i).replace('\n', "\\n"))
+      return "(%s)" % ' '.join(s)
 
 
 urllib_ftpwrapper = urllib.ftpwrapper
@@ -66,7 +80,7 @@ class myftpwrapper(urllib_ftpwrapper):
    def __init__(self, user, passwd, host, port, dirs):
       urllib_ftpwrapper.__init__(self, user, passwd, host, port, dirs)
       global ftpcache_key
-      ftpcache_key = (user, host, port, string.join(dirs, '/'))
+      ftpcache_key = (user, host, port, tuple(dirs))
 
 urllib.ftpwrapper = myftpwrapper
 
@@ -78,20 +92,26 @@ def get_welcome():
    return _welcome
 
 
-from bkmk_objects import Robot
-from parse_html import parse_html
+icons = {} # Icon cache; maps URL to a tuple (content type, data)
+           # or None if there is no icon.
 
 class robot_simple(Robot):
-   def check_url(self, bookmark, url_type, url_rest):
+   def check_url(self, bookmark):
       if not self.tempfname:
          self.tempfname = bookmark.tempfname
 
       try:
          try:
             self.start = int(time.time())
+            bookmark.icon = None
+
+            url_type, url_rest = urllib.splittype(bookmark.href)
             url_host, url_path = urllib.splithost(url_rest)
             url_path, url_tag  = urllib.splittag(url_path)
 
+            # Set fake referer to the root of the site
+            urllib._urlopener.addheaders[2] = ('Referer', "%s://%s%s" % (url_type, url_host, url_path))
+
             if bookmark.charset: urllib._urlopener.addheader('Accept-Charset', bookmark.charset)
             fname, headers = urllib.urlretrieve("%s://%s%s" % (url_type, url_host, url_path), self.tempfname)
             if bookmark.charset: del urllib._urlopener.addheaders[-1]
@@ -135,15 +155,13 @@ class robot_simple(Robot):
                      content_type, charset = content_type.split(';')
                      content_type = content_type.strip()
                      charset = charset.split('=')[1].strip()
-                     if self.log: self.log("   HTTP charset   : %s" % charset)
+                     self.log("   HTTP charset   : %s" % charset)
                   except (ValueError, IndexError):
                      charset = None
-                     if self.log: self.log("   no charset in Content-Type header")
+                     self.log("   no charset in Content-Type header")
                   if content_type == "text/html":
                      parser = parse_html(fname, charset, self.log)
-                     title = parser.title.replace('\r', '').replace('\n', ' ').strip()
-                     bookmark.real_title = parser.unescape(title)
-                     if self.log: self.log("   final title    : %s" % bookmark.real_title)
+                     bookmark.real_title = parser.title
                      if parser.refresh:
                         refresh = parser.refresh
                         try:
@@ -151,18 +169,62 @@ class robot_simple(Robot):
                         except IndexError:
                            url = "self"
                         try:
-                           timeout = int(refresh.split(';')[0])
+                           timeout = float(refresh.split(';')[0])
                         except (IndexError, ValueError):
-                           timeout = None
-                        if timeout is None:
                            raise RedirectException("html", "Bad redirect to %s (%s)" % (url, refresh))
                         else:
-                           raise RedirectException("html", "%s (%d sec)" % (url, timeout))
+                           try:
+                              timeout = int(refresh.split(';')[0])
+                           except ValueError:
+                              pass # float timeout
+                           raise RedirectException("html", "%s (%s sec)" % (url, timeout))
+
+                     # Get favicon.ico
                      icon = parser.icon
                      if not icon:
                         icon = "/favicon.ico"
-                     icon = urljoin("%s://%s" % (url_type, url_host), icon)
-                     if self.log: self.log("   icon           : %s" % icon)
+                     icon = urljoin("%s://%s%s" % (url_type, url_host, url_path), icon)
+                     self.log("   looking for icon at: %s" % icon)
+                     if icon in icons:
+                        if icons[icon]:
+                           content_type, bookmark.icon = icons[icon]
+                           self.log("       cached icon: %s" % content_type)
+                        else:
+                           self.log("       cached icon: no icon")
+                     else:
+                        try:
+                           _icon = icon
+                           for i in range(8):
+                              try:
+                                 fname, headers = urllib.urlretrieve(_icon)
+                              except RedirectException, e:
+                                 _icon = e.url
+                                 self.log("       redirect to : %s" % _icon)
+                              else:
+                                 break
+                           else:
+                              raise IOError("Too many redirects")
+                        except:
+                           etype, emsg, tb = sys.exc_info()
+                           self.log("   no icon        : %s %s" % (etype, emsg))
+                           etype = None
+                           emsg = None
+                           tb = None
+                           icons[icon] = None
+                        else:
+                           content_type = headers["Content-Type"]
+                           if content_type.startswith("image/"):
+                              icon_file = open(fname, "rb")
+                              icon = icon_file.read()
+                              icon_file.close()
+                              bookmark.icon = "data:%s;base64,%s" % (content_type, b64encode(icon))
+                              self.log("   got icon       : %s" % content_type)
+                              icons[icon] = (content_type, bookmark.icon)
+                           else:
+                              self.log("   no icon        : bad content type '%s'" % content_type)
+                              icons[icon] = None
+                           os.remove(fname)
+
                except KeyError:
                   pass