X-Git-Url: https://git.phdru.name/?a=blobdiff_plain;f=Robots%2Fparse_html.py;h=cbb45d612f9b33699b6f2d0bbe397b11a3562fe1;hb=a788e94901728a5ea127b2c09e3a13ff6a5447b9;hp=80c7aa89d28efed06db8a598088303f44d2b338b;hpb=0e76f1851882b99da63a7c8a9e4cdf0c4a48657f;p=bookmarks_db.git diff --git a/Robots/parse_html.py b/Robots/parse_html.py index 80c7aa8..cbb45d6 100755 --- a/Robots/parse_html.py +++ b/Robots/parse_html.py @@ -2,7 +2,7 @@ """ HTML Parsers wrapper - Written by Broytman. Copyright (C) 1997-2008 PhiloSoft Design + Written by Broytman. Copyright (C) 1997-2011 PhiloSoft Design """ import codecs @@ -11,6 +11,7 @@ universal_charset = "utf-8" DEFAULT_CHARSET = "cp1251" # Stupid default for Russian Cyrillic parsers = [] + try: import parse_html_beautifulsoup parse_html_beautifulsoup.DEFAULT_CHARSET = DEFAULT_CHARSET @@ -19,9 +20,34 @@ except ImportError: else: parsers.append(parse_html_beautifulsoup.parse_html) -from parse_html_htmlparser import parse_html -parsers.append(parse_html) +try: + from parse_html_lxml import parse_html +except ImportError: + pass +else: + parsers.append(parse_html) + +try: + from parse_html_htmlparser import parse_html +except ImportError: + pass +else: + parsers.append(parse_html) + +try: + import parse_html_html5 +except ImportError: + pass +else: + parsers.append(parse_html_html5.parse_html) +# ElementTidy often segfaults +#try: +# import parse_html_etreetidy +#except ImportError: +# pass +#else: +# parsers.append(parse_html_etreetidy.parse_html) import re from htmlentitydefs import name2codepoint @@ -34,7 +60,9 @@ def recode_entities(title, charset): for part in entity_re.split(title): if part not in ("&", "<", ">", """) and \ entity_re.match(part): - part = unichr(name2codepoint.get(part[1:-1], part)).encode(charset) + _part = name2codepoint.get(part[1:-1], None) + if _part is not None: + part = unichr(_part).encode(charset) output.append(part) title = ''.join(output) @@ -51,6 +79,9 @@ def recode_entities(title, charset): def parse_html(filename, charset=None, log=None): + if not parsers: + return None + if charset: try: codecs.lookup(charset) # In case of unknown charset... @@ -60,8 +91,9 @@ def parse_html(filename, charset=None, log=None): charsets = [universal_charset, DEFAULT_CHARSET] if charset: charset = charset.lower().replace("windows-", "cp") - if charset not in charsets: - charsets.insert(0, charset) + if charset in charsets: + charsets.remove(charset) + charsets.insert(0, charset) for p in parsers: parser = None @@ -76,6 +108,9 @@ def parse_html(filename, charset=None, log=None): else: if log: log("Parser %s.%s failed, trying next one." % (p.__module__, p.__name__)) + if not parser: + return None + converted_title = title = parser.title if title and (not parser.charset): try: @@ -115,6 +150,14 @@ def parse_html(filename, charset=None, log=None): final_title = ' '.join([s for s in parts if s]) if log and (final_title <> converted_title): log(" final title : %s" % final_title) parser.title = final_title + + icon = parser.icon + if isinstance(icon, unicode): + try: + parser.icon = icon.encode('ascii') + except UnicodeEncodeError: + if parser.charset: + parser.icon = icon.encode(parser.charset) return parser