X-Git-Url: https://git.phdru.name/?a=blobdiff_plain;f=parse_html%2Fbkmk_parse_html.py;h=922c745346ecfa8f8f9c2988ef94f85345dc167a;hb=d454f1d6aa7f2430d502d847693515f69489c66c;hp=5da37a98ab261f238f2e6cb78e29adeb826f0ebb;hpb=c2ea4e82718b903aa123dd77490f36657383b0ca;p=bookmarks_db.git diff --git a/parse_html/bkmk_parse_html.py b/parse_html/bkmk_parse_html.py index 5da37a9..922c745 100644 --- a/parse_html/bkmk_parse_html.py +++ b/parse_html/bkmk_parse_html.py @@ -12,20 +12,18 @@ __all__ = ['parse_html', 'parse_filename', 'universal_charset'] import codecs +import os +import re +try: + from html.entities import name2codepoint +except ImportError: + from htmlentitydefs import name2codepoint -universal_charset = "utf-8" -DEFAULT_CHARSET = "cp1251" # Stupid default for Russian Cyrillic +from compat import unicode, unichr +DEFAULT_CHARSET = "cp1251" # Stupid default for Russian Cyrillic parsers = [] -try: - from . import bkmk_ph_beautifulsoup4 -except ImportError: - pass -else: - bkmk_ph_beautifulsoup4.DEFAULT_CHARSET = DEFAULT_CHARSET - parsers.append(bkmk_ph_beautifulsoup4.parse_html) - try: from . import bkmk_ph_beautifulsoup except ImportError: @@ -35,40 +33,32 @@ else: parsers.append(bkmk_ph_beautifulsoup.parse_html) try: - from . import bkmk_ph_html5 + from . import bkmk_ph_beautifulsoup4 except ImportError: pass else: - parsers.append(bkmk_ph_html5.parse_html) + bkmk_ph_beautifulsoup4.DEFAULT_CHARSET = DEFAULT_CHARSET + parsers.append(bkmk_ph_beautifulsoup4.parse_html) try: - from . import bkmk_ph_lxml + from . import bkmk_ph_htmlparser except ImportError: pass else: - parsers.append(bkmk_ph_lxml.parse_html) + parsers.append(bkmk_ph_htmlparser.parse_html) try: - from . import bkmk_ph_htmlparser + from . import bkmk_ph_lxml except ImportError: pass else: - parsers.append(bkmk_ph_htmlparser.parse_html) - -# ElementTidy often segfaults -#try: -# from . import bkmk_ph_etreetidy -#except ImportError: -# pass -#else: -# parsers.append(bkmk_ph_etreetidy.parse_html) - -import re -from htmlentitydefs import name2codepoint + parsers.append(bkmk_ph_lxml.parse_html) -entity_re = re.compile("(&\w+;)") +universal_charset = "utf-8" +entity_re = re.compile("(&\\w+;)") num_entity_re = re.compile("(&#[0-9]+;)") + def recode_entities(title, charset): output = [] for part in entity_re.split(title): @@ -76,7 +66,7 @@ def recode_entities(title, charset): entity_re.match(part): _part = name2codepoint.get(part[1:-1], None) if _part is not None: - part = unichr(_part).encode(charset) + part = unichr(_part) output.append(part) title = ''.join(output) @@ -84,7 +74,7 @@ def recode_entities(title, charset): for part in num_entity_re.split(title): if num_entity_re.match(part): try: - part = unichr(int(part[2:-1])).encode(charset) + part = unichr(int(part[2:-1])) except UnicodeEncodeError: pass # Leave the entity as is output.append(part) @@ -92,11 +82,11 @@ def recode_entities(title, charset): return ''.join(output) -import os BKMK_DEBUG_HTML_PARSERS = os.environ.get("BKMK_DEBUG_HTML_PARSERS") + def parse_html(html_text, charset=None, log=None): - if not parsers: + if not html_text or not parsers: return None if charset: @@ -144,28 +134,28 @@ def parse_html(html_text, charset=None, log=None): p, parser = _parsers[0] if log: log(" Using %s" % p.__module__) - title = parser.title - if isinstance(title, unicode): - if parser.charset: - parser.title = title.encode(parser.charset) - else: - try: - parser.title = title.encode('ascii') - except UnicodeEncodeError: - try: - parser.title = title.encode(DEFAULT_CHARSET) - except UnicodeEncodeError: - parser.title = title.encode(universal_charset) - parser.charset = universal_charset - else: - parser.charset = DEFAULT_CHARSET - else: - parser.charset = 'ascii' + #title = parser.title + #if isinstance(title, unicode): + # if parser.charset: + # parser.title = title.encode(parser.charset) + # else: + # try: + # parser.title = title.encode('ascii') + # except UnicodeEncodeError: + # try: + # parser.title = title.encode(DEFAULT_CHARSET) + # except UnicodeEncodeError: + # parser.title = title.encode(universal_charset) + # parser.charset = universal_charset + # else: + # parser.charset = DEFAULT_CHARSET + # else: + # parser.charset = 'ascii' converted_title = title = parser.title - if title and (not parser.charset): + if title and isinstance(title, bytes) and (not parser.charset): try: - unicode(title, "ascii") + title.decode("ascii") except UnicodeDecodeError: parser.charset = DEFAULT_CHARSET @@ -180,16 +170,23 @@ def parse_html(html_text, charset=None, log=None): if log: log(" META charset : %s" % parser.charset) elif (not charset) or (charset != parser.charset): if log: log(" guessed charset: %s" % parser.charset) - #if log: log(" current charset: %s" % universal_charset) + # if log: log(" current charset: %s" % universal_charset) if log: log(" title : %s" % title) - if parser.charset != universal_charset: - try: - converted_title = unicode(title, parser.charset).encode(universal_charset) - except UnicodeError: - if log: log(" incorrect conversion from %s, converting from %s" % (parser.charset, DEFAULT_CHARSET)) - converted_title = unicode(title, DEFAULT_CHARSET, "replace").encode(universal_charset, "replace") - parser.charset = DEFAULT_CHARSET - if log and (converted_title != title): log(" converted title: %s" % converted_title) + #if parser.charset != universal_charset: + # try: + # converted_title = title.decode(parser.charset).\ + # encode(universal_charset) + # except UnicodeError: + # if log: + # log(" incorrect conversion from %s," + # "converting from %s" + # % (parser.charset, DEFAULT_CHARSET)) + # converted_title = \ + # title.decode(DEFAULT_CHARSET, "replace").\ + # encode(universal_charset, "replace") + # parser.charset = DEFAULT_CHARSET + #if log and (converted_title != title): + # log(" converted title: %s" % converted_title) except LookupError: if log: log(" unknown charset: '%s'" % parser.charset) else: @@ -199,20 +196,22 @@ def parse_html(html_text, charset=None, log=None): final_title = recode_entities(converted_title, universal_charset) parts = [s.strip() for s in final_title.replace('\r', '').split('\n')] final_title = ' '.join([s for s in parts if s]) - if log and (final_title != converted_title): log(" final title : %s" % final_title) + if log and (final_title != converted_title): + log(" final title : %s" % final_title) parser.title = final_title - icon = parser.icon - if isinstance(icon, unicode): - try: - parser.icon = icon.encode('ascii') - except UnicodeEncodeError: - if parser.charset: - parser.icon = icon.encode(parser.charset) + #icon = parser.icon + #if isinstance(icon, unicode): + # try: + # parser.icon = icon.encode('ascii') + # except UnicodeEncodeError: + # if parser.charset: + # parser.icon = icon.encode(parser.charset) return parser + def parse_filename(filename, charset=None, log=None): - fp = open(filename, 'r') + fp = open(filename, 'rt', encoding=charset) try: parser = parse_html(fp.read(), charset=charset, log=log) finally: