"""HTML Parser using BeautifulSoup
This file is a part of Bookmarks database and Internet robot.
+
"""
__author__ = "Oleg Broytman <phd@phdru.name>"
-__copyright__ = "Copyright (C) 2007-2012 PhiloSoft Design"
+__copyright__ = "Copyright (C) 2007-2023 PhiloSoft Design"
__license__ = "GNU GPL"
__all__ = ['parse_html']
import re
from sgmllib import SGMLParser, SGMLParseError
from BeautifulSoup import BeautifulSoup, CData
+
from .bkmk_ph_util import HTMLParser
+from compat import string_type
-DEFAULT_CHARSET = "cp1251" # Stupid default for Russian Cyrillic
+DEFAULT_CHARSET = "cp1251" # Stupid default for Russian Cyrillic
# http://groups.google.com/group/beautifulsoup/browse_thread/thread/69093cb0d3a3cf63
+
+
class BadDeclParser(BeautifulSoup):
def parse_declaration(self, i):
- """Treat a bogus SGML declaration as raw data. Treat a CDATA
- declaration as a CData object."""
- j = None
- if self.rawdata[i:i+9] == '<![CDATA[':
- k = self.rawdata.find(']]>', i)
- if k == -1:
- k = len(self.rawdata)
- data = self.rawdata[i+9:k]
- j = k+3
- self._toStringSubclass(data, CData)
- else:
- try:
- j = SGMLParser.parse_declaration(self, i)
- except SGMLParseError:
- # Could not parse the DOCTYPE declaration
- # Try to just skip the actual declaration
- match = re.search(r'<!DOCTYPE([^>]*?)>', self.rawdata[i:], re.MULTILINE|re.IGNORECASE)
- if match:
- toHandle = self.rawdata[i:match.end()]
- else:
- toHandle = self.rawdata[i:]
- self.handle_data(toHandle)
- j = i + len(toHandle)
- return j
-
-
-def _parse_html(filename, charset):
- infile = open(filename, 'r')
- try:
- return BadDeclParser(infile, fromEncoding=charset)
- except TypeError:
- return None
- finally:
- infile.close()
-
-def parse_html(filename, charset=None, log=None):
- root = _parse_html(filename, charset)
- if root is None:
- return None
-
- _charset = root.originalEncoding
- if _charset in ("ISO-8859-2", "windows-1252", "MacCyrillic"): # Replace default
- _charset = DEFAULT_CHARSET
- root = _parse_html(filename, _charset)
- if root is None:
- return None
-
- html = root.html
- if html is None:
- html = root
-
- head = html.head
- if head is None:
- head = html # Some sites put TITLE in HTML without HEAD
-
- title = head.title
- if (title is None) and (html is not head):
- # Some sites put TITLE in HTML outside of HEAD
- title = html.title
-
- if title is None:
- # Lookup TITLE in the root
- title = root.title
-
- if title is None:
- return None
-
- if title.string:
- title = title.string
- else:
- parts = []
- for part in title:
- if not isinstance(part, basestring):
- part = unicode(part)
- parts.append(part.strip())
- title = ''.join(parts)
-
- meta = head.find(_find_contenttype, recursive=False)
- if meta:
- try:
- meta_content = meta.get("content")
- if meta_content:
- __charset = meta_content.lower().split('charset=')[1].split(';')[0]
- else:
- __charset = False
- except IndexError: # No charset in the META Content-Type
- meta_charset = False
- else:
- meta_charset = _charset == __charset
- else:
- meta_charset = False
-
- if _charset or meta_charset:
- title = title.encode(_charset or meta_charset)
-
- meta = head.find(_find_refresh, recursive=False)
- if meta:
- refresh = meta.get("content")
- else:
- refresh = None
-
- meta = head.find(_find_icon, recursive=False)
- if meta:
- icon = meta.get("href")
- else:
- icon = None
-
- return HTMLParser(_charset, meta_charset, title, refresh, icon)
+ """Treat a bogus SGML declaration as raw data. Treat a CDATA
+ declaration as a CData object."""
+ j = None
+ if self.rawdata[i:i+9] == '<![CDATA[':
+ k = self.rawdata.find(']]>', i)
+ if k == -1:
+ k = len(self.rawdata)
+ data = self.rawdata[i+9:k]
+ j = k+3
+ self._toStringSubclass(data, CData)
+ else:
+ try:
+ j = SGMLParser.parse_declaration(self, i)
+ except SGMLParseError:
+ # Could not parse the DOCTYPE declaration
+ # Try to just skip the actual declaration
+ match = re.search(
+ r'<!DOCTYPE([^>]*?)>', self.rawdata[i:],
+ re.MULTILINE|re.IGNORECASE) # noqa: E227
+ # missing whitespace around bitwise or shift operator
+ if match:
+ toHandle = self.rawdata[i:match.end()]
+ else:
+ toHandle = self.rawdata[i:]
+ self.handle_data(toHandle)
+ j = i + len(toHandle)
+ return j
+
+
+def _parse_html(html_text, charset):
+ try:
+ return BadDeclParser(html_text, fromEncoding=charset)
+ except TypeError:
+ return None
+
+
+def parse_html(html_text, charset=None, log=None):
+ if not html_text:
+ return None
+ root = _parse_html(html_text, charset)
+ if root is None:
+ return None
+
+ _charset = root.originalEncoding
+ if _charset in ("ISO-8859-2", "windows-1252", "MacCyrillic"):
+ # Replace with default and re-parse
+ _charset = DEFAULT_CHARSET
+ root = _parse_html(html_text, _charset)
+ if root is None:
+ return None
+
+ html = root.html
+ if html is None:
+ html = root
+
+ head = html.head
+ if head is None:
+ head = html # Some sites put TITLE in HTML without HEAD
+
+ title = head.title
+ if (title is None) and (html is not head):
+ # Some sites put TITLE in HTML outside of HEAD
+ title = html.title
+
+ if title is None:
+ # Lookup TITLE in the root
+ title = root.title
+
+ if title is not None:
+ if title.string:
+ title = title.string
+ else:
+ parts = []
+ for part in title:
+ if not isinstance(part, string_type):
+ part = part.decode()
+ parts.append(part.strip())
+ title = ''.join(parts)
+
+ meta = head.find(_find_contenttype, recursive=False)
+ if meta:
+ try:
+ meta_content = meta.get("content")
+ if meta_content:
+ __charset = meta_content.lower().split('charset=')[1].\
+ split(';')[0]
+ else:
+ __charset = False
+ except IndexError: # No charset in the META Content-Type
+ meta_charset = False
+ else:
+ meta_charset = _charset == __charset
+ else:
+ meta_charset = False
+
+ if not meta_charset:
+ meta = head.find(_find_charset, recursive=False)
+ if meta:
+ meta_content = meta.get("charset")
+ if meta_content:
+ meta_charset = _charset = meta_content.lower()
+
+ #if title and (_charset or meta_charset):
+ # title = title.encode(_charset or meta_charset)
+
+ meta = head.find(_find_refresh, recursive=False)
+ if meta:
+ refresh = meta.get("content")
+ else:
+ refresh = None
+
+ meta = head.find(_find_icon, recursive=False)
+ if meta:
+ icon = meta.get("href")
+ else:
+ icon = None
+
+ if (title is None) and (refresh is None) and (icon is None):
+ return None
+ return HTMLParser(_charset, meta_charset, title, refresh, icon)
+
def _find_contenttype(Tag):
- return (Tag.name == "meta") and \
- (Tag.get("http-equiv", '').lower() == "content-type")
+ return (Tag.name == "meta") and \
+ (Tag.get("http-equiv", '').lower() == "content-type")
+
+
+def _find_charset(Tag):
+ return (Tag.name == "meta") and Tag.get("charset", '')
+
def _find_refresh(Tag):
- return (Tag.name == "meta") and \
- (Tag.get("http-equiv", '').lower() == "refresh")
+ return (Tag.name == "meta") and \
+ (Tag.get("http-equiv", '').lower() == "refresh")
+
def _find_icon(Tag):
- return (Tag.name == "link") and \
- (Tag.get("rel", '').lower() in ('icon', 'shortcut icon'))
+ return (Tag.name == "link") and \
+ (Tag.get("rel", '').lower() in ('icon', 'shortcut icon'))