# This file is a part of phdru.name homepage/blog/news generator scripts.
# __author__ = "Oleg Broytman <phd@phdru.name>"
-# __copyright__ = "Copyright (C) 2006-2016 PhiloSoft Design"
+# __copyright__ = "Copyright (C) 2006-2017 PhiloSoft Design"
.SUFFIXES: # Clear the suffix list
%.py: %.tmpl
- umask 022; cheetah compile --nobackup $< && compyle $@
+ umask 022; cheetah compile --encoding=koi8-r --settings='encoding="koi8-r"' --nobackup $< && compyle $@
%.html: %.tmpl
- umask 022; PYTHONPATH=. cheetah fill --nobackup --stdout $< | iconv -f utf-8 > $@
+ umask 022; PYTHONPATH=. PYTHONIOENCODING=koi8-r:replace cheetah fill --encoding=koi8-r --settings='encoding="koi8-r"' --nobackup --stdout $< | iconv -f utf-8 -t koi8-r >$@
.PHONY: fast
umask 022
PYTHONPATH=.
-export PYTHONPATH
+PYTHONIOENCODING=koi8-r:replace
+export PYTHONPATH PYTHONIOENCODING
if [ phd_site.tmpl -nt phd_site.py ]; then
- cheetah compile --nobackup phd_site.tmpl && compyle phd_site.py
+ cheetah compile --encoding=koi8-r --settings='encoding="koi8-r"' --nobackup phd_site.tmpl &&
+ compyle phd_site.py
fi &&
for tmpl in "$@"; do
dir="`dirname \"$tmpl\"`"
outfile="`basename \"$tmpl\" .tmpl`.html"
echo "$tmpl => $dir/$outfile"
- cheetah fill --nobackup --stdout "$tmpl" | iconv -f utf-8 >"$dir/$outfile" || exit 1
+ cheetah fill --encoding=koi8-r --settings='encoding="koi8-r"' \
+ --nobackup --stdout "$tmpl" | iconv -f utf-8 -t koi8-r >"$dir/$outfile" || exit 1
done
+
# -*- coding: koi8-r -*-
__author__ = "Oleg Broytman <phd@phdru.name>"
-__copyright__ = "Copyright (C) 2006-2014 PhiloSoft Design"
+__copyright__ = "Copyright (C) 2006-2017 PhiloSoft Design"
import sys, os
from news import get_news, write_if_changed
+from atom_10 import atom_10
+from rss_20 import rss_20
+
lang = sys.argv[1]
root = sys.argv[2]
""")
for item in news_items:
- new_text.append(' <li><a href="%s">%s - %s</a></li>\n' % (item.rel_link, item.date, item.title))
+ new_text.append(' <li><a href="%s">%s - %s</a></li>\n'
+ % (item.rel_link, item.date, item.title))
new_text.append("""\
</ul>
""")
if lang == "en":
- new_text.append("""\
+ new_text.append("""\
News are also available in
<A HREF="atom_10.xml">Atom 1.0 <img src="Graphics/atom_10.jpg" border=0></A>
and <A HREF="rss_20.xml">RSS 2.0 <img src="Graphics/rss_20.jpg" border=0></A>
""")
elif lang == "ru":
- new_text.append("""\
+ new_text.append("""\
Новостевая лента в форматах
<A HREF="atom_10.xml">Atom 1.0 <img src="../Graphics/atom_10.jpg" border=0></A>
и <A HREF="rss_20.xml">RSS 2.0 <img src="../Graphics/rss_20.jpg" border=0></A>.
write_if_changed(os.path.join(root, "news.tmpl"), ''.join(new_text))
-from atom_10 import atom_10
-from rss_20 import rss_20
-
namespace = {
- "title": "Oleg Broytman's Personal Page - News",
- "baseURL": "http://phdru.name/",
- "indexFile": "news.html",
- "description": "",
- "lang": lang,
- "author": "Oleg Broytman",
- "email": "phd@phdru.name",
- "generator": os.path.basename(sys.argv[0]),
- "posts": news_items,
+ "title": "Oleg Broytman's Personal Page - News",
+ "baseURL": "http://phdru.name/",
+ "indexFile": "news.html",
+ "description": "",
+ "lang": lang,
+ "author": "Oleg Broytman",
+ "email": "phd@phdru.name",
+ "generator": os.path.basename(sys.argv[0]),
+ "posts": news_items,
}
if lang == "ru":
- namespace["title"] = "Oleg Broytman's Personal Page - Russian News"
- namespace["baseURL"] = baseURL = "http://phdru.name/Russian/"
- for item in news_items:
- item.baseURL = baseURL
- item.title = item.title.decode('koi8-r').encode('utf-8')
+ namespace["title"] = "Oleg Broytman's Personal Page - Russian News"
+ namespace["baseURL"] = baseURL = "http://phdru.name/Russian/"
+ for item in news_items:
+ item.baseURL = baseURL
+ if isinstance(item.title, bytes):
+ item.title = item.title.decode('koi8-r').encode('utf-8')
for item in news_items:
- href_parts = item.rel_link.split('/')
- if href_parts:
- if href_parts[0] == '.':
- category = "Home page"
- elif href_parts[0] == "..":
- category = href_parts[1]
- else:
- category = href_parts[0]
- if category: item.categoryList = [category]
-
-atom_tmpl = unicode(atom_10(searchList=[namespace])).encode('koi8-r')
+ href_parts = item.rel_link.split('/')
+ if href_parts:
+ if href_parts[0] == '.':
+ category = "Home page"
+ elif href_parts[0] == "..":
+ category = href_parts[1]
+ else:
+ category = href_parts[0]
+ if category: item.categoryList = [category]
+
+atom_tmpl = atom_10(searchList=[namespace])
+rss_tmpl = rss_20(searchList=[namespace])
+
+try:
+ unicode
+except NameError: # PY3
+ atom_tmpl = str(atom_tmpl)
+ rss_tmpl = str(rss_tmpl)
+else:
+ atom_tmpl = unicode(atom_tmpl).encode('koi8-r')
+ rss_tmpl = unicode(rss_tmpl).encode('koi8-r')
+
write_if_changed(os.path.join(root, "atom_10.xml"), atom_tmpl)
-rss_tmpl = unicode(rss_20(searchList=[namespace])).encode('koi8-r')
write_if_changed(os.path.join(root, "rss_20.xml"), rss_tmpl)
"""News"""
+from __future__ import print_function
+
__author__ = "Oleg Broytman <phd@phdru.name>"
-__copyright__ = "Copyright (C) 2006-2012 PhiloSoft Design"
+__copyright__ = "Copyright (C) 2006-2017 PhiloSoft Design"
__docformat__ = "epytext en"
__all__ = ["get_news", "write_if_changed"]
from datetime import date
-from urlparse import urljoin
+try:
+ from urlparse import urljoin
+except ImportError:
+ from urllib.parse import urljoin
+
+try:
+ unicode
+except NameError: # PY3
+ std_open = open
+ def open(fname, mode):
+ return std_open(fname, mode, encoding='koi8-r')
class NewsItem(object):
- baseURL = "http://phdru.name/"
- excerpt = None
- content = None
- categoryList = []
+ baseURL = "http://phdru.name/"
+ excerpt = None
+ content = None
+ categoryList = []
- def __init__(self, date, title, rel_link):
- self.date = date
- self.title = title
- self.rel_link = rel_link
+ def __init__(self, date, title, rel_link):
+ self.date = date
+ self.title = title
+ self.rel_link = rel_link
- def URL(self):
- return urljoin(self.baseURL, self.rel_link)
+ def URL(self):
+ return urljoin(self.baseURL, self.rel_link)
- def rfc822_date(self):
- y, m, d = self.date.split('-')
- d = date(int(y), int(m), int(d))
- return d.strftime("%a, %d %b %Y %H:%M:%S +0300")
+ def rfc822_date(self):
+ y, m, d = self.date.split('-')
+ d = date(int(y), int(m), int(d))
+ return d.strftime("%a, %d %b %Y %H:%M:%S +0300")
def get_news_header(lang):
- news_tmpl_file = open("news.tmpl-%s" % lang, 'r')
- header = news_tmpl_file.read()
- news_tmpl_file.close()
+ news_tmpl_file = open("news.tmpl-%s" % lang, 'r')
+ header = news_tmpl_file.read()
+ news_tmpl_file.close()
- return header
+ return header
def get_news_items(lang):
- news_items_file = open("news_%s" % lang, 'r')
- news_text = news_items_file.read()
- news_items_file.close()
+ news_items_file = open("news_%s" % lang, 'r')
+ news_text = news_items_file.read()
+ news_items_file.close()
- news_items = []
- for line in news_text.split('\n'):
- if not line: continue
- date, rel_link, title = line.split(None, 2)
- news_items.append(NewsItem(date, title, rel_link))
+ news_items = []
+ for line in news_text.split('\n'):
+ if not line: continue
+ date, rel_link, title = line.split(None, 2)
+ news_items.append(NewsItem(date, title, rel_link))
- return news_items
+ return news_items
def get_news(lang):
- """Get news
+ """Get news
- @param lang: langauge
- @type lang: a string 'en' or 'ru'
- @return: a tuple of (lnaguge-dependent header, a list of new items)
+ @param lang: langauge
+ @type lang: a string 'en' or 'ru'
+ @return: a tuple of (lnaguge-dependent header, a list of new items)
- """
- return get_news_header(lang), get_news_items(lang)
+ """
+ return get_news_header(lang), get_news_items(lang)
def write_if_changed(filename, new_text):
- try:
- infile = open(filename, 'r')
- old_text = infile.read()
- infile.close()
- except IOError:
- old_text = None
-
- if old_text <> new_text:
- print "Writing", filename
- outfile = open(filename, 'w')
- outfile.write(new_text)
- outfile.close()
+ try:
+ infile = open(filename, 'r')
+ old_text = infile.read()
+ infile.close()
+ except IOError:
+ old_text = None
+
+ if old_text != new_text:
+ print("Writing", filename)
+ outfile = open(filename, 'w')
+ outfile.write(new_text)
+ outfile.close()
-import os, re, time, urllib
+import os, re, time
+try:
+ from urllib import quote as url_quote
+except ImportError:
+ from urllib.parse import quote as url_quote
from Cheetah.Template import Template
+from Cheetah.compat import PY2
url_re = r"(((https?|ftp|gopher|telnet)://|(mailto|file|news|about|ed2k|irc|sip|magnet):)[^' \t<>\"]+|(www|web|w3)[A-Za-z0-9_-]*\.[A-Za-z0-9._-]+\.[^' \t<>\"]+)[A-Za-z0-9/]"
def _url2href(match):
- url = match.group(0)
- return '<a href="%s">%s</a>' % (url, url)
+ url = match.group(0)
+ return '<a href="%s">%s</a>' % (url, url)
full_dirs = len(os.getcwd().split('/')) + 1
class phd(Template):
- def __init__(self, *args, **kw):
- if not hasattr(self, "_fileBaseName"):
- self._fileDirName, self._fileBaseName = os.path.split(os.path.abspath(self._CHEETAH_src))
- Template.__init__(self, *args, **kw)
- directories = self._fileDirName.split('/')[full_dirs:] # remove directories up to "./files"
- dirs_to_root = len(directories)
- if dirs_to_root:
- root = "../"*dirs_to_root
- else:
- root = ''
- self.root = root
- path = '/'.join(directories) + '/' + \
- self._fileBaseName.replace(".tmpl", ".html")
- if path[0] <> '/': path = '/' + path
- self.path = path
-
- def copyright(self, start_year):
- this_year = time.localtime()[0]
- if start_year >= this_year:
- return this_year
- if start_year == this_year - 1:
- return "%s, %s" % (start_year, this_year)
- return "%s-%s" % (start_year, this_year)
-
-
- def body(self):
- if hasattr(self, "body_html"):
- return self.body_html().encode('utf-8')
- if hasattr(self, "body_text"):
- return self.text2html()
- if hasattr(self, "body_rst"):
- return self.rst2html()
- if hasattr(self, "body_mkd"):
- return self.mkd2html()
-
- def text2html(self):
- body = re.sub(url_re, _url2href, self.body_text())
-
- paragraphs = body.split("\n\n")
-
- new_paras = []
- for p in paragraphs:
- if isinstance(p, unicode):
- p = p.encode('utf-8')
- parts = p.split("\n ")
- parts[0] = parts[0].strip()
- new_paras.append('\n</p>\n<p>\n'.join(parts))
-
- if self.Title:
- title = "<h1>%s</h1>\n\n" % self.Title
- else:
- title = ''
-
- body = '\n</p>\n\n<p class="head">\n'.join(new_paras)
- return "%s<p>%s</p>" % (title, body)
-
- def rst2html(self):
- from docutils.core import publish_parts
-
- parts = publish_parts(self.body_rst(), writer_name="html")
-
- title = parts["title"] or self.Title
- if title:
- title = "<h1>%s</h1>" % title
-
- subtitle = parts["subtitle"]
- if subtitle:
- subtitle = "<h2>%s</h2>" % subtitle
-
- body = parts["body"]
- parts = []
- for part in (title, subtitle, body):
- if not part:
- continue
- if isinstance(part, unicode):
- part = part.encode('utf-8')
- parts.append(part)
- return "\n\n".join(parts)
-
- def mkd2html(self):
- from markdown import markdown
- return markdown(self.body_mkd(), output_format="html")
-
- def img_thumbnail_800_1024(self, img_name):
- return """\
+ def __init__(self, *args, **kw):
+ if not hasattr(self, "_fileBaseName"):
+ self._fileDirName, self._fileBaseName = os.path.split(os.path.abspath(self._CHEETAH_src))
+ Template.__init__(self, *args, **kw)
+ directories = self._fileDirName.split('/')[full_dirs:] # remove directories up to "./files"
+ dirs_to_root = len(directories)
+ if dirs_to_root:
+ root = "../"*dirs_to_root
+ else:
+ root = ''
+ self.root = root
+ path = '/'.join(directories) + '/' + \
+ self._fileBaseName.replace(".tmpl", ".html")
+ if path[0] != '/': path = '/' + path
+ self.path = path
+
+ def copyright(self, start_year):
+ this_year = time.localtime()[0]
+ if start_year >= this_year:
+ return this_year
+ if start_year == this_year - 1:
+ return "%s, %s" % (start_year, this_year)
+ return "%s-%s" % (start_year, this_year)
+
+
+ def body(self):
+ if hasattr(self, "body_html"):
+ return self.body_html()
+ if hasattr(self, "body_text"):
+ return self.text2html()
+ if hasattr(self, "body_rst"):
+ return self.rst2html()
+ if hasattr(self, "body_mkd"):
+ return self.mkd2html()
+
+ def text2html(self):
+ body = re.sub(url_re, _url2href, self.body_text())
+
+ paragraphs = body.split("\n\n")
+
+ new_paras = []
+ for p in paragraphs:
+ if PY2 and not isinstance(p, bytes):
+ p = p.encode('utf-8')
+ parts = p.split("\n ")
+ parts[0] = parts[0].strip()
+ new_paras.append('\n</p>\n<p>\n'.join(parts))
+
+ if self.Title:
+ title = "<h1>%s</h1>\n\n" % self.Title
+ else:
+ title = ''
+
+ body = '\n</p>\n\n<p class="head">\n'.join(new_paras)
+ return "%s<p>%s</p>" % (title, body)
+
+ def rst2html(self):
+ from docutils.core import publish_parts
+
+ parts = publish_parts(self.body_rst(), writer_name="html")
+
+ title = parts["title"] or self.Title
+ if title:
+ title = "<h1>%s</h1>" % title
+
+ subtitle = parts["subtitle"]
+ if subtitle:
+ subtitle = "<h2>%s</h2>" % subtitle
+
+ body = parts["body"]
+ parts = []
+ for part in (title, subtitle, body):
+ if not part:
+ continue
+ if PY2 and not isinstance(part, bytes):
+ part = part.encode('utf-8')
+ parts.append(part)
+ return "\n\n".join(parts)
+
+ def mkd2html(self):
+ from markdown import markdown
+ return markdown(self.body_mkd(), output_format="html")
+
+ def img_thumbnail_800_1024(self, img_name):
+ return """\
<img src="%(img_name)s-thumbnail.jpg" alt="%(img_name)s-thumbnail.jpg" /><br />
<a href="%(img_name)s-800x600.jpg">800x600</a>, <a href="%(img_name)s-1024x800.jpg">1024x800</a>""" % {"img_name": img_name}
- def wikipedia(self, query):
- return "https://en.wikipedia.org/wiki/%s" % quote_string(query.replace(' ', '_'), ext_safe=',')
+ def wikipedia(self, query):
+ return "https://en.wikipedia.org/wiki/%s" % quote_string(query.replace(' ', '_'), ext_safe=',')
- def wikipedia_ru(self, query):
- return "https://ru.wikipedia.org/wiki/%s" % quote_string(query.replace(' ', '_'), ext_safe=',')
+ def wikipedia_ru(self, query):
+ return "https://ru.wikipedia.org/wiki/%s" % quote_string(query.replace(' ', '_'), ext_safe=',')
- def startpage(self, query):
- return "https://startpage.com/do/search?q=%s" % quote_string(query)
+ def startpage(self, query):
+ return "https://startpage.com/do/search?q=%s" % quote_string(query)
- search = startpage
+ search = startpage
- def nigma(self, query):
- return "http://www.nigma.ru/index.php?s=%s" % quote_string(query)
+ def nigma(self, query):
+ return "http://www.nigma.ru/index.php?s=%s" % quote_string(query)
- def yandex(self, query):
- return "http://www.yandex.ru/yandsearch?text=%s&rpt=rad" % quote_string(query, "cp1251")
+ def yandex(self, query):
+ return "http://www.yandex.ru/yandsearch?text=%s&rpt=rad" % quote_string(query, "cp1251")
- def google(self, query):
- return "http://www.google.com/search?hl=en&ie=utf-8&oe=utf-8&q=%s" % quote_string(query)
+ def google(self, query):
+ return "http://www.google.com/search?hl=en&ie=utf-8&oe=utf-8&q=%s" % quote_string(query)
- def google_ru(self, query):
- return "http://www.google.ru/search?hl=ru&ie=utf-8&oe=utf-8&q=%s" % quote_string(query)
+ def google_ru(self, query):
+ return "http://www.google.ru/search?hl=ru&ie=utf-8&oe=utf-8&q=%s" % quote_string(query)
- def lurkmoar(self, query):
- return "https://lurkmore.to/%s" % quote_string(query.replace(' ', '_'), ext_safe=',')
+ def lurkmoar(self, query):
+ return "https://lurkmore.to/%s" % quote_string(query.replace(' ', '_'), ext_safe=',')
def quote_string(s, to_encoding="utf-8", ext_safe=''):
- return urllib.quote(unicode(s, "utf-8").encode(to_encoding), '/' + ext_safe)
+ if isinstance(s, bytes):
+ s = s.decode("utf-8")
+ if PY2:
+ s = s.encode(to_encoding)
+ return url_quote(s, '/' + ext_safe)
#end def
#if $Tag
<hr width="90%">
-#if isinstance($Tag, basestring)
+#from Cheetah.compat import string_type
+#if isinstance($Tag, string_type)
#if $path.startswith("/Russian/")
Тег:
#else
# -*- coding: koi8-r -*-
__author__ = "Oleg Broytman <phd@phdru.name>"
-__copyright__ = "Copyright (C) 2006-2014 PhiloSoft Design"
+__copyright__ = "Copyright (C) 2006-2017 PhiloSoft Design"
import sys, os
+from Cheetah.compat import string_type
blog_data_root = sys.argv[1]
blog_root = sys.argv[2]
lead = template.Lead.decode('utf-8').encode('koi8-r')
tags = template.Tag
- if isinstance(tags, basestring):
+ if isinstance(tags, string_type):
tags = (tags,)
tags = [tag.decode('utf-8').encode('koi8-r') for tag in tags]