import os, re, time, urllib
-from HTMLParser import HTMLParseError
from Cheetah.Template import Template
-from m_lib.net.www.html import HTMLParser as _HTMLParser
url_re = r"(((https?|ftp|gopher|telnet)://|(mailto|file|news|about|ed2k|irc|sip|magnet):)[^' \t<>\"]+|(www|web|w3)[A-Za-z0-9_-]*\.[A-Za-z0-9._-]+\.[^' \t<>\"]+)[A-Za-z0-9/]"
return "%s, %s" % (start_year, this_year)
return "%s-%s" % (start_year, this_year)
+
def body(self):
if hasattr(self, "body_html"):
body = self.body_html()
body = self.text2html()
if hasattr(self, "body_rst"):
body = self.rst2html()
- self.Body = body
return body
def text2html(self):
parts = [part for part in (title, subtitle, body) if part]
return "\n\n".join(parts)
- def get_first_p(self):
- parser = HTMLParser()
-
- try:
- parser.feed(self.body())
- except (HTMLParseError, HTMLHeadDone):
- pass
-
- try:
- parser.close()
- except (HTMLParseError, HTMLHeadDone):
- pass
-
- return parser.first_p
def img_thumbnail_800_1024(self, img_name):
return """\
def quote_string(s, to_encoding="utf-8", ext_safe=''):
return urllib.quote(unicode(s, "koi8-r").encode(to_encoding), '/' + ext_safe)
-
-
-class HTMLHeadDone(Exception): pass
-
-class HTMLParser(_HTMLParser):
- def __init__(self, charset=None):
- _HTMLParser.__init__(self)
- self.first_p = None
-
- def start_p(self, attrs):
- self.accumulator = '<p>'
-
- def end_p(self):
- self.first_p = self.accumulator + '</p>'
- raise HTMLHeadDone()
blog = {}
years = {}
-# excerpts nd bodies are dictionaries mapping file => excerpt/body
+# bodies is a dictionary mapping file => body
-excerpts = {}
bodies = {}
# Walk the directory recursively
template = Template(file=fullpath)
title_parts = template.Title.split()
title = ' '.join(title_parts[6:])
- lead = getattr(template, "Lead", None)
+ lead = template.Lead
tags = template.Tag
if isinstance(tags, basestring):
file = file[:-len("tmpl")] + "html"
key = (year, month, day, file)
- excerpts[key] = template.get_first_p()
- bodies[key] = template.Body
+ bodies[key] = template.body()
# Need to save the blog?
if blog <> old_blog:
else:
new_text.append('\n<h2>%s %s</h2>' % (day, months_names_ru[int(month)]))
save_date = year, month, day
- if lead:
- lead = lead + ' '
- else:
- lead = ''
new_text.append('''
<p class="head">
%s<a href="%s">%s</a>.
</p>
-''' % (lead, href, title))
+''' % (lead+' ' if lead else '', href, title))
if level == 0:
new_text.append("""
count = 0
for year, month, day, filename, title, lead in reversed(links):
- if lead:
- lead = lead + ' '
- else:
- lead = ''
link = "../%s/%s/%s/%s" % (year, month, day, filename)
- item_text = """<li><a href="%s">%s/%s/%s: %s%s</a></li>""" % (link, year, month, day, lead, title)
+ item_text = """<li><a href="%s">%s/%s/%s: %s%s</a></li>""" % (link, year, month, day, lead+' ' if lead else '', title)
count += 1
if count <= 5:
write_if_changed(os.path.join(blog_root, "tags", "index.tmpl"), ''.join(new_text))
+from HTMLParser import HTMLParseError
+import cgi
+from urlparse import urljoin
+from m_lib.net.www.html import HTMLParser as _HTMLParser
+
+class HTMLDone(Exception): pass
+
+
+class FirstPHTMLParser(_HTMLParser):
+ def __init__(self):
+ _HTMLParser.__init__(self)
+ self.first_p = None
+
+ def start_p(self, attrs):
+ self.accumulator = '<p>'
+
+ def end_p(self):
+ self.first_p = self.accumulator + '</p>'
+ raise HTMLDone()
+
+def get_first_p(body):
+ parser = FirstPHTMLParser()
+
+ try:
+ parser.feed(body)
+ except (HTMLParseError, HTMLDone):
+ pass
+
+ try:
+ parser.close()
+ except (HTMLParseError, HTMLDone):
+ pass
+
+ return parser.first_p
+
+
+class AbsURLHTMLParser(_HTMLParser):
+ def __init__(self, base):
+ _HTMLParser.__init__(self)
+ self.base = base
+
+ def start_a(self, attrs):
+ self.accumulator += '<a'
+ for attrname, value in attrs:
+ value = cgi.escape(value, True)
+ if attrname == 'href':
+ self.accumulator += ' href="%s"' % urljoin(self.base, value)
+ else:
+ self.accumulator += ' %s="%s"' % (attrname, value)
+ self.accumulator += '>'
+
+ def end_a(self):
+ self.accumulator += '</a>'
+
+def absolute_urls(body, base):
+ parser = AbsURLHTMLParser(base)
+
+ try:
+ parser.feed(body)
+ except HTMLParseError:
+ pass
+
+ try:
+ parser.close()
+ except HTMLParseError:
+ pass
+
+ return parser.accumulator
+
+
from atom_10 import atom_10
from rss_20 import rss_20
from news import NewsItem
items = []
for item in tuple(reversed(all_titles_tags))[:10]:
year, month, day, file, title, lead, tags = item
- if lead:
- lead = lead + ' '
- else:
- lead = ''
+ url_path = "%s/%s/%s/%s" % (year, month, day, file)
item = NewsItem(
"%s-%s-%s" % (year, month, day),
- "%s%s" % (lead, title),
- "%s/%s/%s/%s" % (year, month, day, file)
- )
+ "%s%s" % (lead+' ' if lead else '', title),
+ url_path)
items.append(item)
item.baseURL = baseURL
item.categoryList = tags
- item.excerpt = excerpts[(year, month, day, file)]
- item.body = bodies[(year, month, day, file)]
+ body = bodies[(year, month, day, file)]
+ body = absolute_urls(body, baseURL + url_path)
+ item.body = body
+ item.excerpt = get_first_p(body)
namespace = {
"title": "Oleg Broytman's blog",