X-Git-Url: https://git.phdru.name/?a=blobdiff_plain;f=parse_html%2Fbkmk_ph_beautifulsoup.py;h=a2f57157db0347d71592d6732259b380972e8001;hb=d9360788a641b5b1184a2523881e950bab7d7c66;hp=437f67b73fba1b511a0a4111cc2557b9e8facfde;hpb=71712390f4edb041609ff7bc9272d12a5c1a9b1d;p=bookmarks_db.git diff --git a/parse_html/bkmk_ph_beautifulsoup.py b/parse_html/bkmk_ph_beautifulsoup.py index 437f67b..a2f5715 100644 --- a/parse_html/bkmk_ph_beautifulsoup.py +++ b/parse_html/bkmk_ph_beautifulsoup.py @@ -1,10 +1,11 @@ """HTML Parser using BeautifulSoup This file is a part of Bookmarks database and Internet robot. + """ __author__ = "Oleg Broytman " -__copyright__ = "Copyright (C) 2007-2012 PhiloSoft Design" +__copyright__ = "Copyright (C) 2007-2014 PhiloSoft Design" __license__ = "GNU GPL" __all__ = ['parse_html'] @@ -46,24 +47,21 @@ class BadDeclParser(BeautifulSoup): return j -def _parse_html(filename, charset): - infile = open(filename, 'r') +def _parse_html(html_text, charset): try: - return BadDeclParser(infile, fromEncoding=charset) + return BadDeclParser(html_text, fromEncoding=charset) except TypeError: return None - finally: - infile.close() -def parse_html(filename, charset=None, log=None): - root = _parse_html(filename, charset) +def parse_html(html_text, charset=None, log=None): + root = _parse_html(html_text, charset) if root is None: return None _charset = root.originalEncoding if _charset in ("ISO-8859-2", "windows-1252", "MacCyrillic"): # Replace default _charset = DEFAULT_CHARSET - root = _parse_html(filename, _charset) + root = _parse_html(html_text, _charset) if root is None: return None @@ -84,18 +82,16 @@ def parse_html(filename, charset=None, log=None): # Lookup TITLE in the root title = root.title - if title is None: - return None - - if title.string: - title = title.string - else: - parts = [] - for part in title: - if not isinstance(part, basestring): - part = unicode(part) - parts.append(part.strip()) - title = ''.join(parts) + if title is not None: + if title.string: + title = title.string + else: + parts = [] + for part in title: + if not isinstance(part, basestring): + part = unicode(part) + parts.append(part.strip()) + title = ''.join(parts) meta = head.find(_find_contenttype, recursive=False) if meta: @@ -112,7 +108,14 @@ def parse_html(filename, charset=None, log=None): else: meta_charset = False - if _charset or meta_charset: + if not meta_charset: + meta = head.find(_find_charset, recursive=False) + if meta: + meta_content = meta.get("charset") + if meta_content: + meta_charset = _charset = meta_content.lower() + + if title and (_charset or meta_charset): title = title.encode(_charset or meta_charset) meta = head.find(_find_refresh, recursive=False) @@ -127,12 +130,17 @@ def parse_html(filename, charset=None, log=None): else: icon = None + if (title is None) and (refresh is None) and (icon is None): + return None return HTMLParser(_charset, meta_charset, title, refresh, icon) def _find_contenttype(Tag): return (Tag.name == "meta") and \ (Tag.get("http-equiv", '').lower() == "content-type") +def _find_charset(Tag): + return (Tag.name == "meta") and Tag.get("charset", '') + def _find_refresh(Tag): return (Tag.name == "meta") and \ (Tag.get("http-equiv", '').lower() == "refresh")