X-Git-Url: https://git.phdru.name/?a=blobdiff_plain;f=parse_html%2Fbkmk_ph_beautifulsoup.py;h=a2f57157db0347d71592d6732259b380972e8001;hb=00549b1c0622ee6ed0ac12249097cf4562bc486e;hp=0b3d2f9c58ac0291dd92533c7241e896920ce036;hpb=463fac5e388966a8ce95fc84c1c92f5a62afbc9b;p=bookmarks_db.git diff --git a/parse_html/bkmk_ph_beautifulsoup.py b/parse_html/bkmk_ph_beautifulsoup.py index 0b3d2f9..a2f5715 100644 --- a/parse_html/bkmk_ph_beautifulsoup.py +++ b/parse_html/bkmk_ph_beautifulsoup.py @@ -1,13 +1,11 @@ """HTML Parser using BeautifulSoup This file is a part of Bookmarks database and Internet robot. + """ -__version__ = "$Revision$"[11:-2] -__revision__ = "$Id$"[5:-2] -__date__ = "$Date$"[7:-2] __author__ = "Oleg Broytman " -__copyright__ = "Copyright (C) 2007-2011 PhiloSoft Design" +__copyright__ = "Copyright (C) 2007-2014 PhiloSoft Design" __license__ = "GNU GPL" __all__ = ['parse_html'] @@ -49,24 +47,21 @@ class BadDeclParser(BeautifulSoup): return j -def _parse_html(filename, charset): - infile = open(filename, 'r') +def _parse_html(html_text, charset): try: - return BadDeclParser(infile, fromEncoding=charset) + return BadDeclParser(html_text, fromEncoding=charset) except TypeError: return None - finally: - infile.close() -def parse_html(filename, charset=None, log=None): - root = _parse_html(filename, charset) +def parse_html(html_text, charset=None, log=None): + root = _parse_html(html_text, charset) if root is None: return None _charset = root.originalEncoding if _charset in ("ISO-8859-2", "windows-1252", "MacCyrillic"): # Replace default _charset = DEFAULT_CHARSET - root = _parse_html(filename, _charset) + root = _parse_html(html_text, _charset) if root is None: return None @@ -87,18 +82,16 @@ def parse_html(filename, charset=None, log=None): # Lookup TITLE in the root title = root.title - if title is None: - return None - - if title.string: - title = title.string - else: - parts = [] - for part in title: - if not isinstance(part, basestring): - part = unicode(part) - parts.append(part.strip()) - title = ''.join(parts) + if title is not None: + if title.string: + title = title.string + else: + parts = [] + for part in title: + if not isinstance(part, basestring): + part = unicode(part) + parts.append(part.strip()) + title = ''.join(parts) meta = head.find(_find_contenttype, recursive=False) if meta: @@ -115,7 +108,14 @@ def parse_html(filename, charset=None, log=None): else: meta_charset = False - if _charset or meta_charset: + if not meta_charset: + meta = head.find(_find_charset, recursive=False) + if meta: + meta_content = meta.get("charset") + if meta_content: + meta_charset = _charset = meta_content.lower() + + if title and (_charset or meta_charset): title = title.encode(_charset or meta_charset) meta = head.find(_find_refresh, recursive=False) @@ -130,12 +130,17 @@ def parse_html(filename, charset=None, log=None): else: icon = None + if (title is None) and (refresh is None) and (icon is None): + return None return HTMLParser(_charset, meta_charset, title, refresh, icon) def _find_contenttype(Tag): return (Tag.name == "meta") and \ (Tag.get("http-equiv", '').lower() == "content-type") +def _find_charset(Tag): + return (Tag.name == "meta") and Tag.get("charset", '') + def _find_refresh(Tag): return (Tag.name == "meta") and \ (Tag.get("http-equiv", '').lower() == "refresh")