From 338c964afba3651bd8fe6318644c0fcabb66cc3b Mon Sep 17 00:00:00 2001 From: Oleg Broytman Date: Wed, 5 Jan 2011 19:01:21 +0000 Subject: [PATCH] Renamed parse_html modules to bkmk_ph_* to avoid name clashes. git-svn-id: file:///home/phd/archive/SVN/bookmarks_db/trunk@320 fdd5c36f-1aea-0310-aeeb-c58d7e2b6c23 --- parse_html/__init__.py | 163 +---------------- parse_html/bkmk_parse_html.py | 173 ++++++++++++++++++ ...utifulsoup.py => bkmk_ph_beautifulsoup.py} | 2 +- .../{etreetidy.py => bkmk_ph_etreetidy.py} | 2 +- parse_html/{html5.py => bkmk_ph_html5.py} | 2 +- .../{htmlparser.py => bkmk_ph_htmlparser.py} | 0 parse_html/{lxml.py => bkmk_ph_lxml.py} | 2 +- parse_html/{util.py => bkmk_ph_util.py} | 0 8 files changed, 180 insertions(+), 164 deletions(-) create mode 100644 parse_html/bkmk_parse_html.py rename parse_html/{beautifulsoup.py => bkmk_ph_beautifulsoup.py} (99%) rename parse_html/{etreetidy.py => bkmk_ph_etreetidy.py} (98%) rename parse_html/{html5.py => bkmk_ph_html5.py} (98%) rename parse_html/{htmlparser.py => bkmk_ph_htmlparser.py} (100%) rename parse_html/{lxml.py => bkmk_ph_lxml.py} (97%) rename parse_html/{util.py => bkmk_ph_util.py} (100%) diff --git a/parse_html/__init__.py b/parse_html/__init__.py index ec8fcec..17800d0 100644 --- a/parse_html/__init__.py +++ b/parse_html/__init__.py @@ -13,167 +13,10 @@ __license__ = "GNU GPL" __all__ = ['parse_html'] -import codecs +from .bkmk_parse_html import parse_html, universal_charset -universal_charset = "utf-8" -DEFAULT_CHARSET = "cp1251" # Stupid default for Russian Cyrillic -parsers = [] - -try: - from . import beautifulsoup -except ImportError: - pass -else: - beautifulsoup.DEFAULT_CHARSET = DEFAULT_CHARSET - parsers.append(beautifulsoup.parse_html) - -try: - from .lxml import parse_html -except ImportError: - pass -else: - parsers.append(parse_html) - -try: - from .htmlparser import parse_html -except ImportError: - pass -else: - parsers.append(parse_html) - -try: - from . import html5 -except ImportError: - pass -else: - parsers.append(html5.parse_html) - -# ElementTidy often segfaults -#try: -# from . import etreetidy -#except ImportError: -# pass -#else: -# parsers.append(etreetidy.parse_html) - -import re -from htmlentitydefs import name2codepoint - -entity_re = re.compile("(&\w+;)") -num_entity_re = re.compile("(&#[0-9]+;)") - -def recode_entities(title, charset): - output = [] - for part in entity_re.split(title): - if part not in ("&", "<", ">", """) and \ - entity_re.match(part): - _part = name2codepoint.get(part[1:-1], None) - if _part is not None: - part = unichr(_part).encode(charset) - output.append(part) - title = ''.join(output) - - output = [] - for part in num_entity_re.split(title): - if num_entity_re.match(part): - try: - part = unichr(int(part[2:-1])).encode(charset) - except UnicodeEncodeError: - pass # Leave the entity as is - output.append(part) - - return ''.join(output) - - -def parse_html(filename, charset=None, log=None): - if not parsers: - return None - - if charset: - try: - codecs.lookup(charset) # In case of unknown charset... - except (ValueError, LookupError): - charset = None # ...try charset from HTML - - charsets = [universal_charset, DEFAULT_CHARSET] - if charset: - charset = charset.lower().replace("windows-", "cp") - if charset in charsets: - charsets.remove(charset) - charsets.insert(0, charset) - - for p in parsers: - parser = None - for c in charsets: - try: - parser = p(filename, c, log) - except UnicodeEncodeError: - pass - else: - break - if parser: - break - else: - if log: log("Parser %s.%s failed, trying next one." % (p.__module__, p.__name__)) - - if not parser: - if log: log("All parser has failed.") - return None - - if log: log("Using %s.%s" % (p.__module__, p.__name__)) - - converted_title = title = parser.title - if title and (not parser.charset): - try: - unicode(title, "ascii") - except UnicodeDecodeError: - parser.charset = DEFAULT_CHARSET - - if parser.charset: - parser.charset = parser.charset.lower().replace("windows-", "cp") - - if title and parser.charset and ( - (parser.charset <> universal_charset) or - ((not charset) or (charset <> parser.charset))): - try: - if parser.meta_charset: - if log: log(" META charset : %s" % parser.charset) - elif (not charset) or (charset <> parser.charset): - if log: log(" guessed charset: %s" % parser.charset) - if log: log(" current charset: %s" % universal_charset) - if log: log(" title : %s" % title) - if parser.charset <> universal_charset: - try: - converted_title = unicode(title, parser.charset).encode(universal_charset) - except UnicodeError: - if log: log(" incorrect conversion from %s, converting from %s" % (parser.charset, DEFAULT_CHARSET)) - converted_title = unicode(title, DEFAULT_CHARSET, "replace").encode(universal_charset, "replace") - parser.charset = DEFAULT_CHARSET - if log and (converted_title <> title): log(" converted title: %s" % converted_title) - except LookupError: - if log: log(" unknown charset: '%s'" % parser.charset) - else: - if log: log(" title : %s" % title) - - if title: - final_title = recode_entities(converted_title, universal_charset) - parts = [s.strip() for s in final_title.replace('\r', '').split('\n')] - final_title = ' '.join([s for s in parts if s]) - if log and (final_title <> converted_title): log(" final title : %s" % final_title) - parser.title = final_title - - icon = parser.icon - if isinstance(icon, unicode): - try: - parser.icon = icon.encode('ascii') - except UnicodeEncodeError: - if parser.charset: - parser.icon = icon.encode(parser.charset) - return parser - - -def test(): +def main(): import sys l = len(sys.argv) @@ -184,7 +27,7 @@ def test(): filename = sys.argv[1] charset = universal_charset else: - sys.exit("Usage: %s filename [charset]" % sys.argv[0]) + sys.exit("Usage: main filename [charset]") parser = parse_html(filename, charset, log=lambda s: sys.stdout.write(s + '\n')) print " refresh:", parser.refresh diff --git a/parse_html/bkmk_parse_html.py b/parse_html/bkmk_parse_html.py new file mode 100644 index 0000000..f42dab8 --- /dev/null +++ b/parse_html/bkmk_parse_html.py @@ -0,0 +1,173 @@ +"""HTML Parsers + +This file is a part of Bookmarks database and Internet robot. +""" + +__version__ = "$Revision$"[11:-2] +__revision__ = "$Id$"[5:-2] +__date__ = "$Date$"[7:-2] +__author__ = "Oleg Broytman " +__copyright__ = "Copyright (C) 1997-2011 PhiloSoft Design" +__license__ = "GNU GPL" + +__all__ = ['parse_html', 'universal_charset'] + + +import codecs + +universal_charset = "utf-8" +DEFAULT_CHARSET = "cp1251" # Stupid default for Russian Cyrillic + +parsers = [] + +try: + from . import bkmk_ph_beautifulsoup +except ImportError: + pass +else: + bkmk_ph_beautifulsoup.DEFAULT_CHARSET = DEFAULT_CHARSET + parsers.append(bkmk_ph_beautifulsoup.parse_html) + +try: + from . import bkmk_ph_lxml +except ImportError: + pass +else: + parsers.append(bkmk_ph_lxml.parse_html) + +try: + from . import bkmk_ph_htmlparser +except ImportError: + pass +else: + parsers.append(bkmk_ph_htmlparser.parse_html) + +try: + from . import bkmk_ph_html5 +except ImportError: + pass +else: + parsers.append(bkmk_ph_html5.parse_html) + +# ElementTidy often segfaults +#try: +# from . import bkmk_ph_etreetidy +#except ImportError: +# pass +#else: +# parsers.append(bkmk_ph_etreetidy.parse_html) + +import re +from htmlentitydefs import name2codepoint + +entity_re = re.compile("(&\w+;)") +num_entity_re = re.compile("(&#[0-9]+;)") + +def recode_entities(title, charset): + output = [] + for part in entity_re.split(title): + if part not in ("&", "<", ">", """) and \ + entity_re.match(part): + _part = name2codepoint.get(part[1:-1], None) + if _part is not None: + part = unichr(_part).encode(charset) + output.append(part) + title = ''.join(output) + + output = [] + for part in num_entity_re.split(title): + if num_entity_re.match(part): + try: + part = unichr(int(part[2:-1])).encode(charset) + except UnicodeEncodeError: + pass # Leave the entity as is + output.append(part) + + return ''.join(output) + + +def parse_html(filename, charset=None, log=None): + if not parsers: + return None + + if charset: + try: + codecs.lookup(charset) # In case of unknown charset... + except (ValueError, LookupError): + charset = None # ...try charset from HTML + + charsets = [universal_charset, DEFAULT_CHARSET] + if charset: + charset = charset.lower().replace("windows-", "cp") + if charset in charsets: + charsets.remove(charset) + charsets.insert(0, charset) + + for p in parsers: + parser = None + for c in charsets: + try: + parser = p(filename, c, log) + except UnicodeEncodeError: + pass + else: + break + if parser: + break + else: + if log: log("Parser %s.%s failed, trying next one." % (p.__module__, p.__name__)) + + if not parser: + if log: log("All parser has failed.") + return None + + if log: log(" Using %s" % p.__module__) + + converted_title = title = parser.title + if title and (not parser.charset): + try: + unicode(title, "ascii") + except UnicodeDecodeError: + parser.charset = DEFAULT_CHARSET + + if parser.charset: + parser.charset = parser.charset.lower().replace("windows-", "cp") + + if title and parser.charset and ( + (parser.charset <> universal_charset) or + ((not charset) or (charset <> parser.charset))): + try: + if parser.meta_charset: + if log: log(" META charset : %s" % parser.charset) + elif (not charset) or (charset <> parser.charset): + if log: log(" guessed charset: %s" % parser.charset) + #if log: log(" current charset: %s" % universal_charset) + if log: log(" title : %s" % title) + if parser.charset <> universal_charset: + try: + converted_title = unicode(title, parser.charset).encode(universal_charset) + except UnicodeError: + if log: log(" incorrect conversion from %s, converting from %s" % (parser.charset, DEFAULT_CHARSET)) + converted_title = unicode(title, DEFAULT_CHARSET, "replace").encode(universal_charset, "replace") + parser.charset = DEFAULT_CHARSET + if log and (converted_title <> title): log(" converted title: %s" % converted_title) + except LookupError: + if log: log(" unknown charset: '%s'" % parser.charset) + else: + if log: log(" title : %s" % title) + + if title: + final_title = recode_entities(converted_title, universal_charset) + parts = [s.strip() for s in final_title.replace('\r', '').split('\n')] + final_title = ' '.join([s for s in parts if s]) + if log and (final_title <> converted_title): log(" final title : %s" % final_title) + parser.title = final_title + + icon = parser.icon + if isinstance(icon, unicode): + try: + parser.icon = icon.encode('ascii') + except UnicodeEncodeError: + if parser.charset: + parser.icon = icon.encode(parser.charset) + return parser diff --git a/parse_html/beautifulsoup.py b/parse_html/bkmk_ph_beautifulsoup.py similarity index 99% rename from parse_html/beautifulsoup.py rename to parse_html/bkmk_ph_beautifulsoup.py index 1b2d3a4..e02081b 100644 --- a/parse_html/beautifulsoup.py +++ b/parse_html/bkmk_ph_beautifulsoup.py @@ -16,7 +16,7 @@ __all__ = ['parse_html'] import re from sgmllib import SGMLParser, SGMLParseError from BeautifulSoup import BeautifulSoup, CData -from .util import HTMLParser +from .bkmk_ph_util import HTMLParser # http://groups.google.com/group/beautifulsoup/browse_thread/thread/69093cb0d3a3cf63 diff --git a/parse_html/etreetidy.py b/parse_html/bkmk_ph_etreetidy.py similarity index 98% rename from parse_html/etreetidy.py rename to parse_html/bkmk_ph_etreetidy.py index 0b36ecd..893229d 100644 --- a/parse_html/etreetidy.py +++ b/parse_html/bkmk_ph_etreetidy.py @@ -14,7 +14,7 @@ __all__ = ['parse_html'] from elementtidy import TidyHTMLTreeBuilder -from .util import HTMLParser +from .bkmk_ph_util import HTMLParser def parse_html(filename, charset=None, log=None): diff --git a/parse_html/html5.py b/parse_html/bkmk_ph_html5.py similarity index 98% rename from parse_html/html5.py rename to parse_html/bkmk_ph_html5.py index a6443f1..d883bd7 100644 --- a/parse_html/html5.py +++ b/parse_html/bkmk_ph_html5.py @@ -14,7 +14,7 @@ __all__ = ['parse_html'] from html5lib import HTMLParser as HTML5Parser -from .util import HTMLParser +from .bkmk_ph_util import HTMLParser def parse_html(filename, charset=None, log=None): diff --git a/parse_html/htmlparser.py b/parse_html/bkmk_ph_htmlparser.py similarity index 100% rename from parse_html/htmlparser.py rename to parse_html/bkmk_ph_htmlparser.py diff --git a/parse_html/lxml.py b/parse_html/bkmk_ph_lxml.py similarity index 97% rename from parse_html/lxml.py rename to parse_html/bkmk_ph_lxml.py index 01dd759..79cbb01 100644 --- a/parse_html/lxml.py +++ b/parse_html/bkmk_ph_lxml.py @@ -14,7 +14,7 @@ __all__ = ['parse_html'] from lxml.html import parse -from .util import HTMLParser +from .bkmk_ph_util import HTMLParser def parse_html(filename, charset=None, log=None): diff --git a/parse_html/util.py b/parse_html/bkmk_ph_util.py similarity index 100% rename from parse_html/util.py rename to parse_html/bkmk_ph_util.py -- 2.39.5