]> git.phdru.name Git - phdru.name/phdru.name.git/commitdiff
Adapted to Python3 and Cheetah3
authorOleg Broytman <phd@phdru.name>
Sun, 26 Mar 2017 23:23:43 +0000 (02:23 +0300)
committerOleg Broytman <phd@phdru.name>
Mon, 10 Apr 2017 03:26:36 +0000 (06:26 +0300)
Makefile
make-files
make-news.py
news.py
phd.py
phd_site.tmpl
reindex_blog.py

index 7e582419ca5f8ba025fe8d592233add7ace32b54..7e55241f5af99b543138fed26daef4b64ecb7542 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -3,7 +3,7 @@
 # This file is a part of phdru.name homepage/blog/news generator scripts.
 
 # __author__ = "Oleg Broytman <phd@phdru.name>"
-# __copyright__ = "Copyright (C) 2006-2016 PhiloSoft Design"
+# __copyright__ = "Copyright (C) 2006-2017 PhiloSoft Design"
 
 
 .SUFFIXES: # Clear the suffix list
 
 
 %.py: %.tmpl
-       umask 022; cheetah compile --nobackup $< && compyle $@
+       umask 022; cheetah compile --encoding=koi8-r --settings='encoding="koi8-r"' --nobackup $< && compyle $@
 
 %.html: %.tmpl
-       umask 022; PYTHONPATH=. cheetah fill --nobackup --stdout $< | iconv -f utf-8 > $@
+       umask 022; PYTHONPATH=. PYTHONIOENCODING=koi8-r:replace cheetah fill --encoding=koi8-r --settings='encoding="koi8-r"' --nobackup --stdout $< | iconv -f utf-8 -t koi8-r >$@
 
 
 .PHONY: fast
index ec02398709c5d36f2b2396aa53ccd5ea81745324..e5aa6d5d123e5ffbe3ec4860484ff86c8e771bcc 100755 (executable)
@@ -2,15 +2,19 @@
 
 umask 022
 PYTHONPATH=.
-export PYTHONPATH
+PYTHONIOENCODING=koi8-r:replace
+export PYTHONPATH PYTHONIOENCODING
 
 if [ phd_site.tmpl -nt phd_site.py ]; then
-   cheetah compile --nobackup phd_site.tmpl && compyle phd_site.py
+   cheetah compile --encoding=koi8-r --settings='encoding="koi8-r"' --nobackup phd_site.tmpl &&
+   compyle phd_site.py
 fi &&
 
 for tmpl in "$@"; do
    dir="`dirname \"$tmpl\"`"
    outfile="`basename \"$tmpl\" .tmpl`.html"
    echo "$tmpl => $dir/$outfile"
-   cheetah fill --nobackup --stdout "$tmpl" | iconv -f utf-8 >"$dir/$outfile" || exit 1
+   cheetah fill --encoding=koi8-r --settings='encoding="koi8-r"' \
+      --nobackup --stdout "$tmpl" | iconv -f utf-8 -t koi8-r >"$dir/$outfile" || exit 1
 done
+
index f6929c78302ea54093506402c87e0b6bc5d589df..6d6ab7db65019cad6b08ad5de80989704958f8aa 100755 (executable)
@@ -2,11 +2,14 @@
 # -*- coding: koi8-r -*-
 
 __author__ = "Oleg Broytman <phd@phdru.name>"
-__copyright__ = "Copyright (C) 2006-2014 PhiloSoft Design"
+__copyright__ = "Copyright (C) 2006-2017 PhiloSoft Design"
 
 import sys, os
 from news import get_news, write_if_changed
 
+from atom_10 import atom_10
+from rss_20 import rss_20
+
 lang = sys.argv[1]
 root = sys.argv[2]
 
@@ -19,7 +22,8 @@ new_text.append("""
 """)
 
 for item in news_items:
-   new_text.append('   <li><a href="%s">%s - %s</a></li>\n' % (item.rel_link, item.date, item.title))
+    new_text.append('   <li><a href="%s">%s - %s</a></li>\n'
+                    % (item.rel_link, item.date, item.title))
 
 new_text.append("""\
 </ul>
@@ -28,7 +32,7 @@ new_text.append("""\
 """)
 
 if lang == "en":
-   new_text.append("""\
+    new_text.append("""\
 News are also available in
 <A HREF="atom_10.xml">Atom 1.0 <img src="Graphics/atom_10.jpg" border=0></A>
 and <A HREF="rss_20.xml">RSS 2.0 <img src="Graphics/rss_20.jpg" border=0></A>
@@ -36,7 +40,7 @@ formats.
 """)
 
 elif lang == "ru":
-   new_text.append("""\
+    new_text.append("""\
 Новостевая лента в форматах
 <A HREF="atom_10.xml">Atom 1.0 <img src="../Graphics/atom_10.jpg" border=0></A>
 и <A HREF="rss_20.xml">RSS 2.0 <img src="../Graphics/rss_20.jpg" border=0></A>.
@@ -51,40 +55,48 @@ $phd_site.respond(self)
 write_if_changed(os.path.join(root, "news.tmpl"), ''.join(new_text))
 
 
-from atom_10 import atom_10
-from rss_20 import rss_20
-
 namespace = {
-   "title": "Oleg Broytman's Personal Page - News",
-   "baseURL": "http://phdru.name/",
-   "indexFile": "news.html",
-   "description": "",
-   "lang": lang,
-   "author": "Oleg Broytman",
-   "email": "phd@phdru.name",
-   "generator": os.path.basename(sys.argv[0]),
-   "posts": news_items,
+    "title": "Oleg Broytman's Personal Page - News",
+    "baseURL": "http://phdru.name/",
+    "indexFile": "news.html",
+    "description": "",
+    "lang": lang,
+    "author": "Oleg Broytman",
+    "email": "phd@phdru.name",
+    "generator": os.path.basename(sys.argv[0]),
+    "posts": news_items,
 }
 
 if lang == "ru":
-   namespace["title"] = "Oleg Broytman's Personal Page - Russian News"
-   namespace["baseURL"] = baseURL = "http://phdru.name/Russian/"
-   for item in news_items:
-      item.baseURL = baseURL
-      item.title = item.title.decode('koi8-r').encode('utf-8')
+    namespace["title"] = "Oleg Broytman's Personal Page - Russian News"
+    namespace["baseURL"] = baseURL = "http://phdru.name/Russian/"
+    for item in news_items:
+        item.baseURL = baseURL
+        if isinstance(item.title, bytes):
+            item.title = item.title.decode('koi8-r').encode('utf-8')
 
 for item in news_items:
-   href_parts = item.rel_link.split('/')
-   if href_parts:
-      if href_parts[0] == '.':
-         category = "Home page"
-      elif href_parts[0] == "..":
-         category = href_parts[1]
-      else:
-         category = href_parts[0]
-      if category: item.categoryList = [category]
-
-atom_tmpl = unicode(atom_10(searchList=[namespace])).encode('koi8-r')
+    href_parts = item.rel_link.split('/')
+    if href_parts:
+        if href_parts[0] == '.':
+            category = "Home page"
+        elif href_parts[0] == "..":
+            category = href_parts[1]
+        else:
+            category = href_parts[0]
+        if category: item.categoryList = [category]
+
+atom_tmpl = atom_10(searchList=[namespace])
+rss_tmpl = rss_20(searchList=[namespace])
+
+try:
+    unicode
+except NameError:  # PY3
+    atom_tmpl = str(atom_tmpl)
+    rss_tmpl = str(rss_tmpl)
+else:
+    atom_tmpl = unicode(atom_tmpl).encode('koi8-r')
+    rss_tmpl = unicode(rss_tmpl).encode('koi8-r')
+
 write_if_changed(os.path.join(root, "atom_10.xml"), atom_tmpl)
-rss_tmpl = unicode(rss_20(searchList=[namespace])).encode('koi8-r')
 write_if_changed(os.path.join(root, "rss_20.xml"), rss_tmpl)
diff --git a/news.py b/news.py
index 3e0385116228ed6b637c9c918c25066b15d7fde3..82c5c1006dfb291577ae7fe384bf4eedfd0a46dc 100644 (file)
--- a/news.py
+++ b/news.py
@@ -1,76 +1,88 @@
 """News"""
 
+from __future__ import print_function
+
 __author__ = "Oleg Broytman <phd@phdru.name>"
-__copyright__ = "Copyright (C) 2006-2012 PhiloSoft Design"
+__copyright__ = "Copyright (C) 2006-2017 PhiloSoft Design"
 __docformat__ = "epytext en"
 
 __all__ = ["get_news", "write_if_changed"]
 
 
 from datetime import date
-from urlparse import urljoin
+try:
+    from urlparse import urljoin
+except ImportError:
+    from urllib.parse import urljoin
+
+try:
+    unicode
+except NameError:  # PY3
+    std_open = open
+    def open(fname, mode):
+        return std_open(fname, mode, encoding='koi8-r')
 
 class NewsItem(object):
-   baseURL = "http://phdru.name/"
-   excerpt = None
-   content = None
-   categoryList = []
+    baseURL = "http://phdru.name/"
+    excerpt = None
+    content = None
+    categoryList = []
 
-   def __init__(self, date, title, rel_link):
-      self.date = date
-      self.title = title
-      self.rel_link = rel_link
+    def __init__(self, date, title, rel_link):
+        self.date = date
+        self.title = title
+        self.rel_link = rel_link
 
-   def URL(self):
-      return urljoin(self.baseURL, self.rel_link)
+    def URL(self):
+        return urljoin(self.baseURL, self.rel_link)
 
-   def rfc822_date(self):
-      y, m, d = self.date.split('-')
-      d = date(int(y), int(m), int(d))
-      return d.strftime("%a, %d %b %Y %H:%M:%S +0300")
+    def rfc822_date(self):
+        y, m, d = self.date.split('-')
+        d = date(int(y), int(m), int(d))
+        return d.strftime("%a, %d %b %Y %H:%M:%S +0300")
 
 
 def get_news_header(lang):
-   news_tmpl_file = open("news.tmpl-%s" % lang, 'r')
-   header = news_tmpl_file.read()
-   news_tmpl_file.close()
+    news_tmpl_file = open("news.tmpl-%s" % lang, 'r')
+    header = news_tmpl_file.read()
+    news_tmpl_file.close()
 
-   return header
+    return header
 
 def get_news_items(lang):
-   news_items_file = open("news_%s" % lang, 'r')
-   news_text = news_items_file.read()
-   news_items_file.close()
+    news_items_file = open("news_%s" % lang, 'r')
+    news_text = news_items_file.read()
+    news_items_file.close()
 
-   news_items = []
-   for line in news_text.split('\n'):
-      if not line: continue
-      date, rel_link, title = line.split(None, 2)
-      news_items.append(NewsItem(date, title, rel_link))
+    news_items = []
+    for line in news_text.split('\n'):
+        if not line: continue
+        date, rel_link, title = line.split(None, 2)
+        news_items.append(NewsItem(date, title, rel_link))
 
-   return news_items
+    return news_items
 
 def get_news(lang):
-   """Get news
+    """Get news
 
-   @param lang: langauge
-   @type lang: a string 'en' or 'ru'
-   @return: a tuple of (lnaguge-dependent header, a list of new items)
+    @param lang: langauge
+    @type lang: a string 'en' or 'ru'
+    @return: a tuple of (lnaguge-dependent header, a list of new items)
 
-   """
-   return get_news_header(lang), get_news_items(lang)
+    """
+    return get_news_header(lang), get_news_items(lang)
 
 
 def write_if_changed(filename, new_text):
-   try:
-      infile = open(filename, 'r')
-      old_text = infile.read()
-      infile.close()
-   except IOError:
-      old_text = None
-
-   if old_text <> new_text:
-      print "Writing", filename
-      outfile = open(filename, 'w')
-      outfile.write(new_text)
-      outfile.close()
+    try:
+        infile = open(filename, 'r')
+        old_text = infile.read()
+        infile.close()
+    except IOError:
+        old_text = None
+
+    if old_text != new_text:
+        print("Writing", filename)
+        outfile = open(filename, 'w')
+        outfile.write(new_text)
+        outfile.close()
diff --git a/phd.py b/phd.py
index 3c2e0dd038bb987cb6187748dc385fce67aeda07..4e13617540eeb09967a2aff01209cc2be9b9ae82 100644 (file)
--- a/phd.py
+++ b/phd.py
-import os, re, time, urllib
+import os, re, time
+try:
+    from urllib import quote as url_quote
+except ImportError:
+    from urllib.parse import quote as url_quote
 from Cheetah.Template import Template
+from Cheetah.compat import PY2
 
 
 url_re = r"(((https?|ftp|gopher|telnet)://|(mailto|file|news|about|ed2k|irc|sip|magnet):)[^' \t<>\"]+|(www|web|w3)[A-Za-z0-9_-]*\.[A-Za-z0-9._-]+\.[^' \t<>\"]+)[A-Za-z0-9/]"
 
 def _url2href(match):
-   url = match.group(0)
-   return '<a href="%s">%s</a>' % (url, url)
+    url = match.group(0)
+    return '<a href="%s">%s</a>' % (url, url)
 
 
 full_dirs = len(os.getcwd().split('/')) + 1
 
 class phd(Template):
-   def __init__(self, *args, **kw):
-      if not hasattr(self, "_fileBaseName"):
-         self._fileDirName, self._fileBaseName = os.path.split(os.path.abspath(self._CHEETAH_src))
-      Template.__init__(self, *args, **kw)
-      directories = self._fileDirName.split('/')[full_dirs:] # remove directories up to "./files"
-      dirs_to_root = len(directories)
-      if dirs_to_root:
-         root = "../"*dirs_to_root
-      else:
-         root = ''
-      self.root = root
-      path = '/'.join(directories) + '/' + \
-         self._fileBaseName.replace(".tmpl", ".html")
-      if path[0] <> '/': path = '/' + path
-      self.path = path
-
-   def copyright(self, start_year):
-      this_year = time.localtime()[0]
-      if start_year >= this_year:
-         return this_year
-      if start_year == this_year - 1:
-         return "%s, %s" % (start_year, this_year)
-      return "%s-%s" % (start_year, this_year)
-
-
-   def body(self):
-      if hasattr(self, "body_html"):
-         return self.body_html().encode('utf-8')
-      if hasattr(self, "body_text"):
-         return self.text2html()
-      if hasattr(self, "body_rst"):
-         return self.rst2html()
-      if hasattr(self, "body_mkd"):
-         return self.mkd2html()
-
-   def text2html(self):
-      body = re.sub(url_re, _url2href, self.body_text())
-
-      paragraphs = body.split("\n\n")
-
-      new_paras = []
-      for p in paragraphs:
-         if isinstance(p, unicode):
-            p = p.encode('utf-8')
-         parts = p.split("\n   ")
-         parts[0] = parts[0].strip()
-         new_paras.append('\n</p>\n<p>\n'.join(parts))
-
-      if self.Title:
-         title = "<h1>%s</h1>\n\n" % self.Title
-      else:
-         title = ''
-
-      body = '\n</p>\n\n<p class="head">\n'.join(new_paras)
-      return "%s<p>%s</p>" % (title, body)
-
-   def rst2html(self):
-      from docutils.core import publish_parts
-
-      parts = publish_parts(self.body_rst(), writer_name="html")
-
-      title = parts["title"] or self.Title
-      if title:
-         title = "<h1>%s</h1>" % title
-
-      subtitle = parts["subtitle"]
-      if subtitle:
-         subtitle = "<h2>%s</h2>" % subtitle
-
-      body = parts["body"]
-      parts = []
-      for part in (title, subtitle, body):
-          if not part:
-              continue
-          if isinstance(part, unicode):
-              part = part.encode('utf-8')
-          parts.append(part)
-      return "\n\n".join(parts)
-
-   def mkd2html(self):
-      from markdown import markdown
-      return markdown(self.body_mkd(), output_format="html")
-
-   def img_thumbnail_800_1024(self, img_name):
-      return """\
+    def __init__(self, *args, **kw):
+        if not hasattr(self, "_fileBaseName"):
+            self._fileDirName, self._fileBaseName = os.path.split(os.path.abspath(self._CHEETAH_src))
+        Template.__init__(self, *args, **kw)
+        directories = self._fileDirName.split('/')[full_dirs:] # remove directories up to "./files"
+        dirs_to_root = len(directories)
+        if dirs_to_root:
+            root = "../"*dirs_to_root
+        else:
+            root = ''
+        self.root = root
+        path = '/'.join(directories) + '/' + \
+            self._fileBaseName.replace(".tmpl", ".html")
+        if path[0] != '/': path = '/' + path
+        self.path = path
+
+    def copyright(self, start_year):
+        this_year = time.localtime()[0]
+        if start_year >= this_year:
+            return this_year
+        if start_year == this_year - 1:
+            return "%s, %s" % (start_year, this_year)
+        return "%s-%s" % (start_year, this_year)
+
+
+    def body(self):
+        if hasattr(self, "body_html"):
+            return self.body_html()
+        if hasattr(self, "body_text"):
+            return self.text2html()
+        if hasattr(self, "body_rst"):
+            return self.rst2html()
+        if hasattr(self, "body_mkd"):
+            return self.mkd2html()
+
+    def text2html(self):
+        body = re.sub(url_re, _url2href, self.body_text())
+
+        paragraphs = body.split("\n\n")
+
+        new_paras = []
+        for p in paragraphs:
+            if PY2 and not isinstance(p, bytes):
+                p = p.encode('utf-8')
+            parts = p.split("\n  ")
+            parts[0] = parts[0].strip()
+            new_paras.append('\n</p>\n<p>\n'.join(parts))
+
+        if self.Title:
+            title = "<h1>%s</h1>\n\n" % self.Title
+        else:
+            title = ''
+
+        body = '\n</p>\n\n<p class="head">\n'.join(new_paras)
+        return "%s<p>%s</p>" % (title, body)
+
+    def rst2html(self):
+        from docutils.core import publish_parts
+
+        parts = publish_parts(self.body_rst(), writer_name="html")
+
+        title = parts["title"] or self.Title
+        if title:
+            title = "<h1>%s</h1>" % title
+
+        subtitle = parts["subtitle"]
+        if subtitle:
+            subtitle = "<h2>%s</h2>" % subtitle
+
+        body = parts["body"]
+        parts = []
+        for part in (title, subtitle, body):
+             if not part:
+                  continue
+             if PY2 and not isinstance(part, bytes):
+                  part = part.encode('utf-8')
+             parts.append(part)
+        return "\n\n".join(parts)
+
+    def mkd2html(self):
+        from markdown import markdown
+        return markdown(self.body_mkd(), output_format="html")
+
+    def img_thumbnail_800_1024(self, img_name):
+        return """\
 <img src="%(img_name)s-thumbnail.jpg" alt="%(img_name)s-thumbnail.jpg" /><br />
 <a href="%(img_name)s-800x600.jpg">800x600</a>, <a href="%(img_name)s-1024x800.jpg">1024x800</a>""" % {"img_name": img_name}
 
-   def wikipedia(self, query):
-      return "https://en.wikipedia.org/wiki/%s" % quote_string(query.replace(' ', '_'), ext_safe=',')
+    def wikipedia(self, query):
+        return "https://en.wikipedia.org/wiki/%s" % quote_string(query.replace(' ', '_'), ext_safe=',')
 
-   def wikipedia_ru(self, query):
-      return "https://ru.wikipedia.org/wiki/%s" % quote_string(query.replace(' ', '_'), ext_safe=',')
+    def wikipedia_ru(self, query):
+        return "https://ru.wikipedia.org/wiki/%s" % quote_string(query.replace(' ', '_'), ext_safe=',')
 
-   def startpage(self, query):
-       return "https://startpage.com/do/search?q=%s" % quote_string(query)
+    def startpage(self, query):
+         return "https://startpage.com/do/search?q=%s" % quote_string(query)
 
-   search = startpage
+    search = startpage
 
-   def nigma(self, query):
-       return "http://www.nigma.ru/index.php?s=%s" % quote_string(query)
+    def nigma(self, query):
+         return "http://www.nigma.ru/index.php?s=%s" % quote_string(query)
 
-   def yandex(self, query):
-      return "http://www.yandex.ru/yandsearch?text=%s&rpt=rad" % quote_string(query, "cp1251")
+    def yandex(self, query):
+        return "http://www.yandex.ru/yandsearch?text=%s&rpt=rad" % quote_string(query, "cp1251")
 
-   def google(self, query):
-      return "http://www.google.com/search?hl=en&ie=utf-8&oe=utf-8&q=%s" % quote_string(query)
+    def google(self, query):
+        return "http://www.google.com/search?hl=en&ie=utf-8&oe=utf-8&q=%s" % quote_string(query)
 
-   def google_ru(self, query):
-      return "http://www.google.ru/search?hl=ru&ie=utf-8&oe=utf-8&q=%s" % quote_string(query)
+    def google_ru(self, query):
+        return "http://www.google.ru/search?hl=ru&ie=utf-8&oe=utf-8&q=%s" % quote_string(query)
 
-   def lurkmoar(self, query):
-       return "https://lurkmore.to/%s" % quote_string(query.replace(' ', '_'), ext_safe=',')
+    def lurkmoar(self, query):
+         return "https://lurkmore.to/%s" % quote_string(query.replace(' ', '_'), ext_safe=',')
 
 def quote_string(s, to_encoding="utf-8", ext_safe=''):
-   return urllib.quote(unicode(s, "utf-8").encode(to_encoding), '/' + ext_safe)
+    if isinstance(s, bytes):
+        s = s.decode("utf-8")
+        if PY2:
+            s = s.encode(to_encoding)
+    return url_quote(s, '/' + ext_safe)
index d176f0ee6ee6bd787e1f4d85c0be39a879f4e7b3..0388f0bf8b86c863e4e7b22e6daa9bf982fee006 100644 (file)
@@ -120,7 +120,8 @@ $body
 #end def
 #if $Tag
 <hr width="90%">
-#if isinstance($Tag, basestring)
+#from Cheetah.compat import string_type
+#if isinstance($Tag, string_type)
 #if $path.startswith("/Russian/")
 Тег:
 #else
index 33a5736dbbc1c022e04a58a65dd9ee9d167ef3e3..c2a87c603062d38d493ed1bef8031b88413ff47a 100755 (executable)
@@ -2,9 +2,10 @@
 # -*- coding: koi8-r -*-
 
 __author__ = "Oleg Broytman <phd@phdru.name>"
-__copyright__ = "Copyright (C) 2006-2014 PhiloSoft Design"
+__copyright__ = "Copyright (C) 2006-2017 PhiloSoft Design"
 
 import sys, os
+from Cheetah.compat import string_type
 
 blog_data_root = sys.argv[1]
 blog_root = sys.argv[2]
@@ -54,7 +55,7 @@ for dirpath, dirs, files in os.walk(blog_root):
       lead = template.Lead.decode('utf-8').encode('koi8-r')
 
       tags = template.Tag
-      if isinstance(tags, basestring):
+      if isinstance(tags, string_type):
          tags = (tags,)
       tags = [tag.decode('utf-8').encode('koi8-r') for tag in tags]