X-Git-Url: https://git.phdru.name/?a=blobdiff_plain;f=Robots%2Fparse_html_beautifulsoup.py;h=47ecbaf459f367a9a80d535af1add172709dcf0d;hb=0e76f1851882b99da63a7c8a9e4cdf0c4a48657f;hp=4f395a16507e58d5e9a600de00b9056cb2aeb94b;hpb=d2499bf060be42a28feebde2e8bded52504ced95;p=bookmarks_db.git diff --git a/Robots/parse_html_beautifulsoup.py b/Robots/parse_html_beautifulsoup.py index 4f395a1..47ecbaf 100644 --- a/Robots/parse_html_beautifulsoup.py +++ b/Robots/parse_html_beautifulsoup.py @@ -1,13 +1,16 @@ """ HTML Parser using BeautifulSoup - Written by BroytMann. Copyright (C) 2007 PhiloSoft Design + Written by Broytman. Copyright (C) 2007, 2008 PhiloSoft Design """ -from BeautifulSoup import BeautifulSoup +import re +from sgmllib import SGMLParser, SGMLParseError +from HTMLParser import HTMLParser +from BeautifulSoup import BeautifulSoup, CData -class DummyParser(object): +class BSoupParser(HTMLParser): def __init__(self, charset, meta, title, refresh, icon): object.__init__(self) self.charset = charset @@ -16,39 +19,113 @@ class DummyParser(object): self.refresh = refresh self.icon = icon -def parse_html(filename, charset=None): - infile = open(filename, 'r') - root = BeautifulSoup(infile, fromEncoding=charset) - infile.close() - charset = root.originalEncoding - try: - title = root.html.head.title.string.encode(charset) - except AttributeError: - title = '' +# http://groups.google.com/group/beautifulsoup/browse_thread/thread/69093cb0d3a3cf63 +class BadDeclParser(BeautifulSoup): + def parse_declaration(self, i): + """Treat a bogus SGML declaration as raw data. Treat a CDATA + declaration as a CData object.""" + j = None + if self.rawdata[i:i+9] == '', i) + if k == -1: + k = len(self.rawdata) + data = self.rawdata[i+9:k] + j = k+3 + self._toStringSubclass(data, CData) + else: + try: + j = SGMLParser.parse_declaration(self, i) + except SGMLParseError: + # Could not parse the DOCTYPE declaration + # Try to just skip the actual declaration + match = re.search(r']*?)>', self.rawdata[i:], re.MULTILINE|re.IGNORECASE) + if match: + toHandle = self.rawdata[i:match.end()] + else: + toHandle = self.rawdata[i:] + self.handle_data(toHandle) + j = i + len(toHandle) + return j + +def _parse_html(filename, charset): + infile = open(filename, 'r') try: - meta = root.html.head.find(_find_refresh, recursive=False) - except AttributeError: - refresh = None - else: - if meta: - refresh = meta.get("content") + return BadDeclParser(infile, fromEncoding=charset) + except TypeError: + return None + finally: + infile.close() + +def parse_html(filename, charset=None, log=None): + root = _parse_html(filename, charset) + if root is None: + return None + + _charset = root.originalEncoding + if _charset in ("ISO-8859-2", "windows-1252", "MacCyrillic"): # Replace default + _charset = DEFAULT_CHARSET + root = _parse_html(filename, _charset) + if root is None: + return None + + html = root.html + if html is None: + html = root + + head = html.head + if head is None: + head = html # Some sites put TITLE in HTML without HEAD + + title = head.title + if (title is None) and (html is not head): + # Some sites put TITLE in HTML outside of HEAD + title = html.title + + if title is None: + # Lookup TITLE in the root + title = root.title + + if title is not None: + if title.string: + title = title.string.encode(_charset) else: - refresh = None + parts = [] + for part in title: + if not isinstance(part, basestring): + part = unicode(part) + parts.append(part.strip()) + title = ''.join(parts).encode(_charset) - try: - meta = root.html.head.find(_find_icon, recursive=False) - except AttributeError: - icon = None - else: - if meta: - icon = meta.get("href") + meta = head.find(_find_contenttype, recursive=False) + if meta: + try: + __charset = meta.get("content").lower().split('charset=')[1].split(';')[0] + except IndexError: # No charset in the META Content-Type + meta_charset = False else: - icon = None + meta_charset = _charset == __charset + else: + meta_charset = False + + meta = head.find(_find_refresh, recursive=False) + if meta: + refresh = meta.get("content") + else: + refresh = None + + meta = head.find(_find_icon, recursive=False) + if meta: + icon = meta.get("href") + else: + icon = None + + return BSoupParser(_charset, meta_charset, title, refresh, icon) - parser = DummyParser(charset, False, title, refresh, icon) - return parser +def _find_contenttype(Tag): + return (Tag.name == "meta") and \ + (Tag.get("http-equiv", '').lower() == "content-type") def _find_refresh(Tag): return (Tag.name == "meta") and \