X-Git-Url: https://git.phdru.name/?a=blobdiff_plain;f=parse_html%2Fbkmk_ph_beautifulsoup4.py;h=10e06a97cd4fc4e25689cd3a92040878a5f2775a;hb=HEAD;hp=aad3f8fb9092bdba2daec856a93f4309f9714795;hpb=11632b7623b2b2e01995f013bc6d8ba01c20cf74;p=bookmarks_db.git diff --git a/parse_html/bkmk_ph_beautifulsoup4.py b/parse_html/bkmk_ph_beautifulsoup4.py index aad3f8f..060f078 100644 --- a/parse_html/bkmk_ph_beautifulsoup4.py +++ b/parse_html/bkmk_ph_beautifulsoup4.py @@ -5,25 +5,42 @@ This file is a part of Bookmarks database and Internet robot. """ __author__ = "Oleg Broytman " -__copyright__ = "Copyright (C) 2017 PhiloSoft Design" +__copyright__ = "Copyright (C) 2017-2023 PhiloSoft Design" __license__ = "GNU GPL" __all__ = ['parse_html'] -import re +import warnings + from bs4 import BeautifulSoup + from .bkmk_ph_util import HTMLParser +from compat import string_type + +warnings.filterwarnings( + 'ignore', 'No parser was explicitly specified') +warnings.filterwarnings( + 'ignore', + "It looks like you're parsing an XML document using an HTML parser.") + +universal_charset = "utf-8" +DEFAULT_CHARSET = "cp1251" # Stupid default for Russian Cyrillic -DEFAULT_CHARSET = "cp1251" # Stupid default for Russian Cyrillic def _parse_html(html_text, charset): try: - return BeautifulSoup(html_text, from_encoding=charset) + if isinstance(html_text, bytes): + return BeautifulSoup(html_text, from_encoding=charset) + else: + return BeautifulSoup(html_text) except TypeError: return None + def parse_html(html_text, charset=None, log=None): + if not html_text: + return None root = _parse_html(html_text, charset) if root is None: return None @@ -35,7 +52,7 @@ def parse_html(html_text, charset=None, log=None): head = html.head if head is None: - head = html # Some sites put TITLE in HTML without HEAD + head = html # Some sites put TITLE in HTML without HEAD title = head.title if (title is None) and (html is not head): @@ -52,9 +69,12 @@ def parse_html(html_text, charset=None, log=None): else: parts = [] for part in title: - if not isinstance(part, basestring): - part = unicode(part) - parts.append(part.strip()) + #if not isinstance(part, string_type): + # part = part.decode() + if part.strip: + parts.append(part.strip()) + else: + parts.append(' ') # Skip tags, they're usually `
` title = ''.join(parts) meta = head.find(_find_contenttype, recursive=False) @@ -62,10 +82,11 @@ def parse_html(html_text, charset=None, log=None): try: meta_content = meta.get("content") if meta_content: - __charset = meta_content.lower().split('charset=')[1].split(';')[0] + __charset = meta_content.lower().split('charset=')[1].\ + split(';')[0] else: __charset = False - except IndexError: # No charset in the META Content-Type + except IndexError: # No charset in the META Content-Type meta_charset = False else: meta_charset = _charset = __charset @@ -79,8 +100,12 @@ def parse_html(html_text, charset=None, log=None): if meta_content: meta_charset = _charset = meta_content.lower() - if title and (_charset or meta_charset): - title = title.encode(_charset or meta_charset) + #if title and (_charset or meta_charset): + # try: + # title = title.encode(_charset or meta_charset) + # except LookupError: + # title = title.encode(universal_charset) + # _charset = universal_charset meta = head.find(_find_refresh, recursive=False) if meta: @@ -98,17 +123,23 @@ def parse_html(html_text, charset=None, log=None): return None return HTMLParser(_charset, meta_charset, title, refresh, icon) + def _find_contenttype(Tag): return (Tag.name == "meta") and \ (Tag.get_attribute_list("http-equiv", '')[0].lower() == "content-type") + def _find_charset(Tag): return (Tag.name == "meta") and Tag.get("charset", '') + def _find_refresh(Tag): return (Tag.name == "meta") and \ (Tag.get_attribute_list("http-equiv", '')[0].lower() == "refresh") + def _find_icon(Tag): - return (Tag.name == "link") and \ - (Tag.get_attribute_list("rel", '')[0].lower() in ('icon', 'shortcut icon')) + if Tag.name != "link": + return False + rel = ' '.join(Tag.get_attribute_list("rel", '')) + return rel in ('icon', 'shortcut icon')