X-Git-Url: https://git.phdru.name/?a=blobdiff_plain;f=Robots%2Fparse_html.py;h=228a3ceaddff6c0fe1acc8af64a374f348c5bb27;hb=52092194ea42dcece57ed93c2a2875cd2907564e;hp=e676504a9741765fd5f7bc98fb61cd9758b739a2;hpb=2ff00dae6cd629acb517fcb7223b6c17789e9136;p=bookmarks_db.git diff --git a/Robots/parse_html.py b/Robots/parse_html.py index e676504..228a3ce 100755 --- a/Robots/parse_html.py +++ b/Robots/parse_html.py @@ -2,7 +2,7 @@ """ HTML Parsers wrapper - Written by BroytMann. Copyright (C) 1997-2008 PhiloSoft Design + Written by Broytman. Copyright (C) 1997-2010 PhiloSoft Design """ import codecs @@ -11,6 +11,7 @@ universal_charset = "utf-8" DEFAULT_CHARSET = "cp1251" # Stupid default for Russian Cyrillic parsers = [] + try: import parse_html_beautifulsoup parse_html_beautifulsoup.DEFAULT_CHARSET = DEFAULT_CHARSET @@ -19,8 +20,26 @@ except ImportError: else: parsers.append(parse_html_beautifulsoup.parse_html) -from parse_html_htmlparser import parse_html -parsers.append(parse_html) +try: + from parse_html_lxml import parse_html +except ImportError: + pass +else: + parsers.append(parse_html) + +try: + from parse_html_htmlparser import parse_html +except ImportError: + pass +else: + parsers.append(parse_html) + +try: + import parse_html_html5 +except ImportError: + pass +else: + parsers.append(parse_html_html5.parse_html) import re @@ -32,9 +51,11 @@ num_entity_re = re.compile("(&#[0-9]+;)") def recode_entities(title, charset): output = [] for part in entity_re.split(title): - if part not in ("&", "<", ">", ""e;") and \ + if part not in ("&", "<", ">", """) and \ entity_re.match(part): - part = unichr(name2codepoint.get(part[1:-1], part)).encode(charset) + _part = name2codepoint.get(part[1:-1], None) + if _part is not None: + part = unichr(_part).encode(charset) output.append(part) title = ''.join(output) @@ -51,6 +72,9 @@ def recode_entities(title, charset): def parse_html(filename, charset=None, log=None): + if not parsers: + return None + if charset: try: codecs.lookup(charset) # In case of unknown charset... @@ -60,8 +84,9 @@ def parse_html(filename, charset=None, log=None): charsets = [universal_charset, DEFAULT_CHARSET] if charset: charset = charset.lower().replace("windows-", "cp") - if charset not in charsets: - charsets.insert(0, charset) + if charset in charsets: + charsets.remove(charset) + charsets.insert(0, charset) for p in parsers: parser = None @@ -76,8 +101,11 @@ def parse_html(filename, charset=None, log=None): else: if log: log("Parser %s.%s failed, trying next one." % (p.__module__, p.__name__)) + if not parser: + return None + converted_title = title = parser.title - if not parser.charset: + if title and (not parser.charset): try: unicode(title, "ascii") except UnicodeDecodeError: @@ -86,7 +114,7 @@ def parse_html(filename, charset=None, log=None): if parser.charset: parser.charset = parser.charset.lower().replace("windows-", "cp") - if parser.charset and ( + if title and parser.charset and ( (parser.charset <> universal_charset) or ((not charset) or (charset <> parser.charset))): try: @@ -109,11 +137,12 @@ def parse_html(filename, charset=None, log=None): else: if log: log(" title : %s" % title) - final_title = recode_entities(converted_title, universal_charset) - parts = [s.strip() for s in final_title.replace('\r', '').split('\n')] - final_title = ' '.join([s for s in parts if s]) - if log and (final_title <> converted_title): log(" final title : %s" % final_title) - parser.title = final_title + if title: + final_title = recode_entities(converted_title, universal_charset) + parts = [s.strip() for s in final_title.replace('\r', '').split('\n')] + final_title = ' '.join([s for s in parts if s]) + if log and (final_title <> converted_title): log(" final title : %s" % final_title) + parser.title = final_title return parser