X-Git-Url: https://git.phdru.name/?a=blobdiff_plain;f=parse_html%2Fbkmk_ph_beautifulsoup4.py;h=1095ebce8d510c22df2c9768baed3c27777e1909;hb=dd8d2bd3a0f9bf4e09f58d7289437cf43350f373;hp=10e06a97cd4fc4e25689cd3a92040878a5f2775a;hpb=be7cec61fa405f38dea2edde623174ca47ca7dc3;p=bookmarks_db.git diff --git a/parse_html/bkmk_ph_beautifulsoup4.py b/parse_html/bkmk_ph_beautifulsoup4.py index 10e06a9..1095ebc 100644 --- a/parse_html/bkmk_ph_beautifulsoup4.py +++ b/parse_html/bkmk_ph_beautifulsoup4.py @@ -5,18 +5,20 @@ This file is a part of Bookmarks database and Internet robot. """ __author__ = "Oleg Broytman " -__copyright__ = "Copyright (C) 2017 PhiloSoft Design" +__copyright__ = "Copyright (C) 2017-2023 PhiloSoft Design" __license__ = "GNU GPL" __all__ = ['parse_html'] -import re from bs4 import BeautifulSoup + from .bkmk_ph_util import HTMLParser +from compat import string_type universal_charset = "utf-8" -DEFAULT_CHARSET = "cp1251" # Stupid default for Russian Cyrillic +DEFAULT_CHARSET = "cp1251" # Stupid default for Russian Cyrillic + def _parse_html(html_text, charset): try: @@ -24,6 +26,7 @@ def _parse_html(html_text, charset): except TypeError: return None + def parse_html(html_text, charset=None, log=None): root = _parse_html(html_text, charset) if root is None: @@ -36,7 +39,7 @@ def parse_html(html_text, charset=None, log=None): head = html.head if head is None: - head = html # Some sites put TITLE in HTML without HEAD + head = html # Some sites put TITLE in HTML without HEAD title = head.title if (title is None) and (html is not head): @@ -53,8 +56,8 @@ def parse_html(html_text, charset=None, log=None): else: parts = [] for part in title: - if not isinstance(part, basestring): - part = unicode(part) + if not isinstance(part, string_type): + part = part.decode() parts.append(part.strip()) title = ''.join(parts) @@ -63,10 +66,11 @@ def parse_html(html_text, charset=None, log=None): try: meta_content = meta.get("content") if meta_content: - __charset = meta_content.lower().split('charset=')[1].split(';')[0] + __charset = meta_content.lower().split('charset=')[1].\ + split(';')[0] else: __charset = False - except IndexError: # No charset in the META Content-Type + except IndexError: # No charset in the META Content-Type meta_charset = False else: meta_charset = _charset = __charset @@ -80,12 +84,12 @@ def parse_html(html_text, charset=None, log=None): if meta_content: meta_charset = _charset = meta_content.lower() - if title and (_charset or meta_charset): - try: - title = title.encode(_charset or meta_charset) - except LookupError: - title = title.encode(universal_charset) - _charset = universal_charset + #if title and (_charset or meta_charset): + # try: + # title = title.encode(_charset or meta_charset) + # except LookupError: + # title = title.encode(universal_charset) + # _charset = universal_charset meta = head.find(_find_refresh, recursive=False) if meta: @@ -103,17 +107,22 @@ def parse_html(html_text, charset=None, log=None): return None return HTMLParser(_charset, meta_charset, title, refresh, icon) + def _find_contenttype(Tag): return (Tag.name == "meta") and \ (Tag.get_attribute_list("http-equiv", '')[0].lower() == "content-type") + def _find_charset(Tag): return (Tag.name == "meta") and Tag.get("charset", '') + def _find_refresh(Tag): return (Tag.name == "meta") and \ (Tag.get_attribute_list("http-equiv", '')[0].lower() == "refresh") + def _find_icon(Tag): return (Tag.name == "link") and \ - (Tag.get_attribute_list("rel", '')[0].lower() in ('icon', 'shortcut icon')) + (Tag.get_attribute_list("rel", '')[0].lower() + in ('icon', 'shortcut icon'))