X-Git-Url: https://git.phdru.name/?a=blobdiff_plain;ds=sidebyside;f=Robots%2Fparse_html_beautifulsoup.py;h=e03dfce99faa55b980e21a75e38249790d50c0a7;hb=6688a7206d9b545e52cfbc4cf2adae691a1a0933;hp=c8e8f5db54af91c96a439584d09ade66b094de46;hpb=5c1c16d3e1a5fdf4ea1ecc6c31f5e4c6a4240bed;p=bookmarks_db.git diff --git a/Robots/parse_html_beautifulsoup.py b/Robots/parse_html_beautifulsoup.py index c8e8f5d..e03dfce 100644 --- a/Robots/parse_html_beautifulsoup.py +++ b/Robots/parse_html_beautifulsoup.py @@ -1,55 +1,130 @@ """ HTML Parser using BeautifulSoup - Written by BroytMann. Copyright (C) 2007 PhiloSoft Design + Written by Broytman. Copyright (C) 2007-2010 PhiloSoft Design """ -from HTMLParser import HTMLParser -from BeautifulSoup import BeautifulSoup +import re +from sgmllib import SGMLParser, SGMLParseError +from BeautifulSoup import BeautifulSoup, CData +from parse_html_util import HTMLParser -class BSoupParser(HTMLParser): - def __init__(self, charset, meta, title, refresh, icon): - object.__init__(self) - self.charset = charset - self.meta_charset = meta - self.title = title - self.refresh = refresh - self.icon = icon +# http://groups.google.com/group/beautifulsoup/browse_thread/thread/69093cb0d3a3cf63 +class BadDeclParser(BeautifulSoup): + def parse_declaration(self, i): + """Treat a bogus SGML declaration as raw data. Treat a CDATA + declaration as a CData object.""" + j = None + if self.rawdata[i:i+9] == '', i) + if k == -1: + k = len(self.rawdata) + data = self.rawdata[i+9:k] + j = k+3 + self._toStringSubclass(data, CData) + else: + try: + j = SGMLParser.parse_declaration(self, i) + except SGMLParseError: + # Could not parse the DOCTYPE declaration + # Try to just skip the actual declaration + match = re.search(r']*?)>', self.rawdata[i:], re.MULTILINE|re.IGNORECASE) + if match: + toHandle = self.rawdata[i:match.end()] + else: + toHandle = self.rawdata[i:] + self.handle_data(toHandle) + j = i + len(toHandle) + return j -def parse_html(filename, charset=None): +def _parse_html(filename, charset): infile = open(filename, 'r') - root = BeautifulSoup(infile, fromEncoding=charset) - infile.close() + try: + return BadDeclParser(infile, fromEncoding=charset) + except TypeError: + return None + finally: + infile.close() + +def parse_html(filename, charset=None, log=None): + root = _parse_html(filename, charset) + if root is None: + return None _charset = root.originalEncoding - try: - title = root.html.head.title.string.encode(_charset) - except AttributeError: - title = '' + if _charset in ("ISO-8859-2", "windows-1252", "MacCyrillic"): # Replace default + _charset = DEFAULT_CHARSET + root = _parse_html(filename, _charset) + if root is None: + return None - try: - meta = root.html.head.find(_find_refresh, recursive=False) - except AttributeError: - refresh = None + html = root.html + if html is None: + html = root + + head = html.head + if head is None: + head = html # Some sites put TITLE in HTML without HEAD + + title = head.title + if (title is None) and (html is not head): + # Some sites put TITLE in HTML outside of HEAD + title = html.title + + if title is None: + # Lookup TITLE in the root + title = root.title + + if title is None: + return None + + if title.string: + title = title.string else: - if meta: - refresh = meta.get("content") + parts = [] + for part in title: + if not isinstance(part, basestring): + part = unicode(part) + parts.append(part.strip()) + title = ''.join(parts) + + meta = head.find(_find_contenttype, recursive=False) + if meta: + try: + meta_content = meta.get("content") + if meta_content: + __charset = meta_content.lower().split('charset=')[1].split(';')[0] + else: + __charset = False + except IndexError: # No charset in the META Content-Type + meta_charset = False else: - refresh = None + meta_charset = _charset == __charset + else: + meta_charset = False - try: - meta = root.html.head.find(_find_icon, recursive=False) - except AttributeError: - icon = None + if _charset or meta_charset: + title = title.encode(_charset or meta_charset) + + meta = head.find(_find_refresh, recursive=False) + if meta: + refresh = meta.get("content") else: - if meta: - icon = meta.get("href") - else: - icon = None + refresh = None - return BSoupParser(_charset, _charset == charset, title, refresh, icon) + meta = head.find(_find_icon, recursive=False) + if meta: + icon = meta.get("href") + else: + icon = None + + return HTMLParser(_charset, meta_charset, title, refresh, icon) + +def _find_contenttype(Tag): + return (Tag.name == "meta") and \ + (Tag.get("http-equiv", '').lower() == "content-type") def _find_refresh(Tag): return (Tag.name == "meta") and \