X-Git-Url: https://git.phdru.name/?p=phdru.name%2Fphdru.name.git;a=blobdiff_plain;f=phd.py;h=a68db56e5cb2b4f601bdc84f398ab7ecd93c4cd3;hp=3e275e6f21c8a3fe5490a032cf5208b8e1abf7e8;hb=HEAD;hpb=7a19a23b5facfac2066cebaf7b34c270b14d2dbe diff --git a/phd.py b/phd.py index 3e275e6..4aa206c 100644 --- a/phd.py +++ b/phd.py @@ -1,127 +1,178 @@ -import os, re, time, urllib +import os, re, time +try: + from urllib import quote as url_quote +except ImportError: + from urllib.parse import quote as url_quote from Cheetah.Template import Template +from Cheetah.compat import PY2 +from blog_db import load_blog url_re = r"(((https?|ftp|gopher|telnet)://|(mailto|file|news|about|ed2k|irc|sip|magnet):)[^' \t<>\"]+|(www|web|w3)[A-Za-z0-9_-]*\.[A-Za-z0-9._-]+\.[^' \t<>\"]+)[A-Za-z0-9/]" + def _url2href(match): - url = match.group(0) - return '%s' % (url, url) + url = match.group(0) + return '%s' % (url, url) full_dirs = len(os.getcwd().split('/')) + 1 + class phd(Template): - def __init__(self, *args, **kw): - if not hasattr(self, "_fileBaseName"): - self._fileDirName, self._fileBaseName = os.path.split(os.path.abspath(self._CHEETAH_src)) - Template.__init__(self, *args, **kw) - directories = self._fileDirName.split('/')[full_dirs:] # remove directories up to "./files" - dirs_to_root = len(directories) - if dirs_to_root: - root = "../"*dirs_to_root - else: - root = '' - self.root = root - path = '/'.join(directories) + '/' + \ - self._fileBaseName.replace(".tmpl", ".html") - if path[0] <> '/': path = '/' + path - self.path = path - - def copyright(self, start_year): - this_year = time.localtime()[0] - if start_year >= this_year: - return this_year - if start_year == this_year - 1: - return "%s, %s" % (start_year, this_year) - return "%s-%s" % (start_year, this_year) - - - def body(self): - if hasattr(self, "body_html"): - return self.body_html().encode('utf-8') - if hasattr(self, "body_text"): - return self.text2html() - if hasattr(self, "body_rst"): - return self.rst2html() - if hasattr(self, "body_mkd"): - return self.mkd2html() - - def text2html(self): - body = re.sub(url_re, _url2href, self.body_text()) - - paragraphs = body.split("\n\n") - - new_paras = [] - for p in paragraphs: - if isinstance(p, unicode): - p = p.encode('utf-8') - parts = p.split("\n ") - parts[0] = parts[0].strip() - new_paras.append('\n

\n

\n'.join(parts)) - - if self.Title: - title = "

%s

\n\n" % self.Title - else: - title = '' - - body = '\n

\n\n

\n'.join(new_paras) - return "%s

%s

" % (title, body) - - def rst2html(self): - from docutils.core import publish_parts - - parts = publish_parts(self.body_rst(), writer_name="html") - - title = parts["title"] or self.Title - if title: - title = "

%s

" % title - - subtitle = parts["subtitle"] - if subtitle: - subtitle = "

%s

" % subtitle - - body = parts["body"] - parts = [] - for part in (title, subtitle, body): - if not part: - continue - if isinstance(part, unicode): - part = part.encode('utf-8') - parts.append(part) - return "\n\n".join(parts) - - def mkd2html(self): - from markdown import markdown - return markdown(self.body_mkd(), output_format="html") - - def img_thumbnail_800_1024(self, img_name): - return """\ + def __init__(self, *args, **kw): + if not hasattr(self, "_fileBaseName"): + self._fileDirName, self._fileBaseName = \ + os.path.split(os.path.abspath(self._CHEETAH_src)) + Template.__init__(self, *args, **kw) + # remove directories up to "./files" + directories = self._fileDirName.split('/')[full_dirs:] + dirs_to_root = len(directories) + if dirs_to_root: + root = "../"*dirs_to_root + else: + root = '' + self.root = root + path = '/'.join(directories) + '/' + \ + self._fileBaseName.replace(".tmpl", ".html") + if path[0] != '/': path = '/' + path + self.path = path + + def copyright(self, start_year): + this_year = time.localtime()[0] + if start_year >= this_year: + return this_year + if start_year == this_year - 1: + return "%s, %s" % (start_year, this_year) + return "%s-%s" % (start_year, this_year) + + def body(self): + if hasattr(self, "body_html"): + return self.body_html() + if hasattr(self, "body_text"): + return self.text2html() + if hasattr(self, "body_rst"): + return self.rst2html() + if hasattr(self, "body_mkd"): + return self.mkd2html() + + def text2html(self): + body = re.sub(url_re, _url2href, self.body_text()) + + paragraphs = body.split("\n\n") + + new_paras = [] + for p in paragraphs: + if PY2 and not isinstance(p, bytes): + p = p.encode('utf-8') + parts = p.split("\n ") + parts[0] = parts[0].strip() + new_paras.append('\n

\n

\n'.join(parts)) + + if self.Title: + title = "

%s

\n\n" % self.Title + else: + title = '' + + body = '\n

\n\n

\n'.join(new_paras) + return "%s

%s

" % (title, body) + + def rst2html(self): + from docutils.core import publish_parts + + parts = publish_parts(self.body_rst(), writer_name="html") + + title = parts["title"] or self.Title + if title: + title = "

%s

" % title + + subtitle = parts["subtitle"] + if subtitle: + subtitle = "

%s

" % subtitle + + body = parts["body"] + parts = [] + for part in (title, subtitle, body): + if not part: + continue + if PY2 and not isinstance(part, bytes): + part = part.encode('utf-8') + parts.append(part) + return "\n\n".join(parts) + + def mkd2html(self): + from markdown import markdown + return markdown(self.body_mkd(), output_format="html") + + def find_near_blog_posts(self): + if not self.path.startswith("/Russian/blog/"): + return None, None, None, None + dirs = self.path.split('/') + # blog post is ['', 'Russian', 'blog', year, month, day, filename] + if len(dirs) != 7: # Not a blog post + return None, None, None, None + ymd = tuple(dirs[3:6]) + filename = self._fileBaseName + + prev_key = prev_blog_post = current_key = current_blog_post = \ + next_key = next_blog_post = None + blog = load_blog() + for key in sorted(blog): + for blog_post in sorted(blog[key]): + if current_blog_post: + prev_key = current_key + prev_blog_post = current_blog_post + if next_blog_post: + current_key = next_key + current_blog_post = next_blog_post + next_key = key + next_blog_post = blog_post + if current_blog_post and (current_key == ymd) and \ + (current_blog_post[0] == filename): # Found! + return prev_key, prev_blog_post, next_key, next_blog_post + return current_key, current_blog_post, None, None + + def img_thumbnail_800_1024(self, img_name): + return """\ %(img_name)s-thumbnail.jpg
800x600, 1024x800""" % {"img_name": img_name} - def wikipedia(self, query): - return "https://en.wikipedia.org/wiki/%s" % quote_string(query.replace(' ', '_'), ext_safe=',') + def wikipedia(self, query): + return "https://en.wikipedia.org/wiki/%s" % quote_string( + query.replace(' ', '_'), ext_safe=',') + + def wikipedia_ru(self, query): + return "https://ru.wikipedia.org/wiki/%s" % quote_string( + query.replace(' ', '_'), ext_safe=',') + + def startpage(self, query): + return "https://startpage.com/do/search?q=%s" % quote_string(query) - def wikipedia_ru(self, query): - return "https://ru.wikipedia.org/wiki/%s" % quote_string(query.replace(' ', '_'), ext_safe=',') + search = startpage - def startpage(self, query): - return "https://startpage.com/do/search?q=%s" % quote_string(query) + def nigma(self, query): + return "http://www.nigma.ru/index.php?s=%s" % quote_string(query) - search = startpage + def yandex(self, query): + return "http://www.yandex.ru/yandsearch?text=%s&rpt=rad" \ + % quote_string(query, "cp1251") - def nigma(self, query): - return "http://www.nigma.ru/index.php?s=%s" % quote_string(query) + def google(self, query): + return "http://www.google.com/search?hl=en&ie=utf-8&oe=utf-8&q=%s" \ + % quote_string(query) - def yandex(self, query): - return "http://www.yandex.ru/yandsearch?text=%s&rpt=rad" % quote_string(query, "cp1251") + def google_ru(self, query): + return "http://www.google.ru/search?hl=ru&ie=utf-8&oe=utf-8&q=%s" \ + % quote_string(query) - def google(self, query): - return "http://www.google.com/search?hl=en&ie=utf-8&oe=utf-8&q=%s" % quote_string(query) + def lurkmoar(self, query): + return "http://lurklurk.com/%s" % quote_string( + query.replace(' ', '_'), ext_safe=',') - def google_ru(self, query): - return "http://www.google.ru/search?hl=ru&ie=utf-8&oe=utf-8&q=%s" % quote_string(query) def quote_string(s, to_encoding="utf-8", ext_safe=''): - return urllib.quote(unicode(s, "utf-8").encode(to_encoding), '/' + ext_safe) + if isinstance(s, bytes): + s = s.decode("utf-8") + if PY2: + s = s.encode(to_encoding) + return url_quote(s, '/' + ext_safe)