#!/usr/bin/env python3
"""A simple blog generator with Pandoc as backend."""
# TODO: put blog configurations in a config file
# TODO: auto retouch: prompt for git commit amend after touching
# (display commit message to avoid amending the wrong commit)
# pylint: disable=too-many-lines
import argparse
from contextlib import contextmanager
import copy
import datetime
import email.utils
import fileinput
import io
import http.client
import http.server
import multiprocessing
import os
import re
import shutil
import signal
import subprocess
import sys
import tempfile
import time
import urllib.parse
import lxml.etree as ET
import bs4
import colorama
import dateutil.parser
import dateutil.tz
############################# BLOG CONFIGURATIONS ##############################
# Safe to customize
BLOG_HOME = "http://zmwangx.github.io/"
BLOG_TITLE = "dl? cmplnts?"
BLOG_DESCRIPTION = "Zhiming Wang's personal blog"
LANGUAGE = "en-us"
AUTHOR = "Zhiming Wang"
AUTHOR_EMAIL = "zmwangx@gmail.com"
ATOM_ICON_PATH = "img/icon-400.png" # set to None to leave it out
RSS_ICON_PATH = "img/icon-100.png" # set to None to leave it out
RSS_ICON_WIDTH = 100
RSS_ICON_HEIGHT = 100
########################## END OF BLOG CONFIGURATIONS ##########################
########################### GENERATOR CONFIGURATIONS ###########################
# Do not touch unless you know what you are doing.
GENERATOR_NAME = "pyblog"
GENERATOR_HOME_PAGE = "https://github.com/zmwangx/zmwangx.github.io"
ROOTDIR = os.path.dirname(os.path.realpath(__file__))
SOURCEDIR = os.path.join(ROOTDIR, "source")
POSTSDIR = os.path.join(SOURCEDIR, "blog")
INDEXMD = os.path.join(SOURCEDIR, "index.md")
GENERATORSOURCE = os.path.join(ROOTDIR, "pyblog")
TEMPLATEDIR = os.path.join(ROOTDIR, "templates")
HTMLTEMPLATE = os.path.join(TEMPLATEDIR, "template.html")
BUILDDIR = os.path.join(ROOTDIR, "build")
ATOM = os.path.join(BUILDDIR, "atom.xml")
RSS = os.path.join(BUILDDIR, "rss.xml")
INDEXHTML = os.path.join(BUILDDIR, "index.html")
FEED_MAX_ENTRIES = 20
CODE_LINE_HEIGHT = 18
####################### END OF GENERATOR CONFIGURATIONS ########################
# declare the global foreground ANSI codes
BLACK = ""
BLUE = ""
CYAN = ""
GREEN = ""
MAGENTA = ""
RED = ""
WHITE = ""
YELLOW = ""
RESET = ""
@contextmanager
def init_colorama():
"""Set global foreground modifying ANSI codes.
BLACK, BLUE, CYAN, GREEN, MAGENTA, RED, WHITE, YELLOW, and RESET.
"""
# pylint: disable=exec-used,invalid-name
colorama.init()
for color, ansi in colorama.Fore.__dict__.items():
exec("global {0}; {0} = '{1}'".format(color, ansi))
yield
for color in colorama.Fore.__dict__:
exec("global {0}; {0} = ''".format(color))
colorama.deinit()
def current_datetime():
"""Return the current datetime, complete with tzinfo.
Precision is one second. Timezone is the local timezone.
"""
return datetime.datetime.fromtimestamp(round(time.time()),
dateutil.tz.tzlocal())
class AtomFeed(object):
"""Class for storing atom:feed data and metadata.
https://tools.ietf.org/html/rfc4287.
"""
# pylint: disable=invalid-name,too-many-instance-attributes
def __init__(self):
"""Define available attributes."""
self.author = None # atom:author
self.generator = None # atom:generator, optional
self.icon = None # atom:icon, optional
self.logo = None # atom:logo, optional
self.id_text = None # atom:id, just use URI
self.id = None # atom:id
self.links = [] # list of atom:link
self.title_text = None # the text of atom:title
self.title = None # atom:title
self.subtitle_text = None # the text of atom:subtitle
self.subtitle = None # atom:subtitle
self.updated_datetime = None # update time as a datetime object
self.updated = None # atom:updated
self.entries = [] # list of atom:entry, in reverse time order
self.feed = None # atom:feed, assembled
def assemble_feed(self):
"""Assemble atom:feed."""
# pylint: disable=multiple-statements
self.feed = ET.Element("feed", xmlns="http://www.w3.org/2005/Atom")
self.feed.append(self.title)
if self.subtitle is not None: self.feed.append(self.subtitle)
for link in self.links:
self.feed.append(link)
self.feed.append(self.updated)
self.feed.append(self.id)
self.feed.append(self.author)
if self.icon is not None: self.feed.append(self.icon)
if self.logo is not None: self.feed.append(self.icon)
if self.generator is not None: self.feed.append(self.generator)
# include at most FEED_MAX_ENTRIES entries in the feed
for entry in self.entries[:FEED_MAX_ENTRIES]:
self.feed.append(entry.entry)
def dump_feed(self):
"""Dump atom:feed XML."""
if self.feed is None:
self.assemble_feed()
return ET.tostring(self.feed).decode("utf-8")
class AtomEntry(object):
"""Class for storing atom:entry data and metadata."""
# pylint: disable=invalid-name,too-many-instance-attributes
def __init__(self):
"""Define available attributes."""
self.author = None # atom:author
self.id_text = None # atom:id, just use URI
self.id = None # atom:id
self.relpath = None # HTML page path relative to home
self.link = None # atom:link
self.title_text = None # plain text title
self.title = None # atom:title
self.updated_datetime = None # update time as a datetime object
self.updated = None # atom:updated
self.content_html = None # content as HTML markup
self.content = None # atom:content
self.entry = None # atom:entry, assembled
def assemble_entry(self):
"""Assemble atom:entry."""
self.entry = ET.Element("entry")
self.entry.append(self.title)
self.entry.append(self.link)
self.entry.append(self.updated)
self.entry.append(self.id)
self.entry.append(self.author)
self.entry.append(self.content)
def dump_entry(self):
"""Dump atom:entry XML."""
if self.entry is None:
self.assemble_entry()
return ET.tostring(self.entry).decode("utf-8")
class RssFeed(object):
"""Class for storing an RSS 2.0 feed.
https://validator.w3.org/feed/docs/rss2.html.
"""
# pylint: disable=too-many-instance-attributes
REQUIRED_ELEMENTS = ["title", "link", "description"]
OPTIONAL_ELEMENTS = ["language", "copyright", "managingEditor", "webMaster",
"pubDate", "lastBuildDate", "category", "generator",
"docs", "cloud", "ttl", "image", "textInput",
"skipHours", "skipDays"]
def __init__(self):
"""Define available attributes."""
self.rssurl = None # the URL of the rss feed
self.atomlink = None
for element in self.REQUIRED_ELEMENTS:
setattr(self, element, None)
for element in self.OPTIONAL_ELEMENTS:
setattr(self, element, None)
self.docs = ET.Element("docs")
self.docs.text = "https://validator.w3.org/feed/docs/rss2.html"
self.author_text = None
self.update_timestamp = None
self.items = []
self.rss = None
self.channel = None
def assemble_rss(self):
"""Assemble RSS 2.0 feed."""
self.rss = ET.Element("rss", version="2.0", nsmap={"atom": "http://www.w3.org/2005/Atom"})
self.channel = ET.SubElement(self.rss, "channel")
# https://validator.w3.org/feed/docs/warning/MissingAtomSelfLink.html
self.atomlink = ET.SubElement(self.channel, "{http://www.w3.org/2005/Atom}link",
href=self.rssurl, rel="self", type="application/rss+xml")
for element in self.REQUIRED_ELEMENTS:
self.channel.append(getattr(self, element))
for element in self.OPTIONAL_ELEMENTS:
attr = getattr(self, element)
if attr is not None:
self.channel.append(attr)
# include at most FEED_MAX_ENTRIES items in the RSS feed
for item in self.items[:FEED_MAX_ENTRIES]:
self.channel.append(item.item)
def dump_rss(self):
"""Dump RSS feed XML."""
if self.rss is None:
self.assemble_rss()
return ET.tostring(self.rss).decode("utf-8")
class RssItem(object):
"""Class for storing an RSS 2.0 item."""
ELEMENTS = ["title", "link", "description", "author", "category", "comments",
"enclosure", "guid", "pubDate", "source"]
def __init__(self):
"""Define available attributes."""
for element in self.ELEMENTS:
setattr(self, element, None)
self.timestamp = None
self.item = None
def assemble_item(self):
"""Assemble an RSS 2.0 item."""
self.item = ET.Element("item")
for element in self.ELEMENTS:
attr = getattr(self, element)
if attr is not None:
self.item.append(attr)
def dump_item(self):
"""Dump RSS item XML."""
if self.item is None:
self.assemble_item()
return ET.tostring(self.item).decode("utf-8")
def generate_index(feed):
"""Generate index.html from index.md and a TOC."""
sys.stderr.write("generating index.html\n")
# generate TOC
tocbuff = io.StringIO()
tocbuff.write('
')
year = 10000 # will be larger than the latest year for quite a while
# recall that entries are in reverse chronological order
table_opened = False
for entry in feed.entries:
date = entry.updated_datetime
if date.year < year:
# close the previous table if there is one
if table_opened:
tocbuff.write(u'\n')
# write a new
tag with the smaller year
year = date.year
tocbuff.write(u'\n
{0}
\n\n'.format(year))
tocbuff.write(u'
\n')
table_opened = True
# write a new table row entry in Markdown, in the format:
#
#
\n' %
(date.isoformat(), monthday, entry.title_text, entry.relpath))
if table_opened:
tocbuff.write(u'
\n')
tocbuff.write('
')
# create tempfile with index.md and the TOC concatenated, and generate index.html from that
# pylint: disable=invalid-name
fd, tmppath = tempfile.mkstemp()
os.close(fd)
with open(tmppath, 'w', encoding='utf-8') as tmpfile:
if os.path.exists(INDEXMD):
with open(INDEXMD, 'r', encoding='utf-8') as indexmd:
tmpfile.write(u"%s\n\n\n\n" % indexmd.read())
tmpfile.write("%s\n" % tocbuff.getvalue())
tocbuff.close()
pandoc_args = [
"pandoc", tmppath,
"--template", HTMLTEMPLATE,
"--highlight-style=pygments",
"-o", INDEXHTML,
]
try:
subprocess.check_call(pandoc_args)
except subprocess.CalledProcessError:
sys.stderr.write("error: failed to generate index.html\n")
os.remove(tmppath)
def make_sitemap_url_element(link, updated=None, changefreq=None, priority=None):
"""Make a sitemap element.
Parameters
----------
link : str or xml.etree.ElementTree.Element
If using an xml.etree.ElementTree.Element element, then it shall
be an atom:link element, e.g., .
updated : datetime or xml.etree.ElementTree.Element, optional
If using an xml.etree.ElementTree.Element element, then it shall
be an atom:updated element, e.g.,
2015-05-05T22:38:42-07:00.
changefreq : {"always", "hourly", "daily", "weekly", "monthly", "yearly", "never"}, optional
priority : {1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1}, optional
"""
urlelem = ET.Element("url")
loc = ET.Element("loc")
loc.text = link.attrib["href"] if isinstance(link, ET._Element) else link
urlelem.append(loc)
if updated is not None:
lastmod = ET.Element("lastmod")
lastmod.text = (updated.text if isinstance(updated, ET._Element)
else updated.isoformat())
urlelem.append(lastmod)
if changefreq is not None:
changefreq_elem = ET.Element("changefreq")
changefreq_elem.text = changefreq
urlelem.append(changefreq_elem)
if priority is not None:
priority_elem = ET.Element("priority")
priority_elem.text = "%.1f" % priority
urlelem.append(priority_elem)
return urlelem
def generate_sitemap(feed):
"""Generate sitemap.xml."""
sitemap = ET.Element("urlset", xmlns="http://www.sitemaps.org/schemas/sitemap/0.9")
# index
sitemap.append(make_sitemap_url_element(BLOG_HOME, feed.updated, "daily", 1.0))
# other top level pages
for name in os.listdir(BUILDDIR):
if not name.endswith(".html") or name == "index.html":
continue
link = urllib.parse.urljoin(BLOG_HOME, name)
fullpath = os.path.join(BUILDDIR, name)
# try to extract updated time
updated = None
with open(fullpath, encoding="utf-8") as htmlobj:
soup = bs4.BeautifulSoup(htmlobj.read(), "lxml")
if soup.footer is not None:
updated_tag = soup.footer.find(attrs={"class": "updated"})
if updated_tag is not None:
updated = dateutil.parser.parse(updated_tag.text)
sitemap.append(make_sitemap_url_element(link, updated, "monthly", 0.9))
# blog entries
for entry in feed.entries:
sitemap.append(make_sitemap_url_element(entry.link, entry.updated, "monthly", 0.9))
sitemappath = os.path.join(BUILDDIR, "sitemap.xml")
with open(sitemappath, "w", encoding="utf-8") as sitemapfile:
sitemapfile.write('\n%s\n' %
ET.tostring(sitemap).decode('utf-8'))
sys.stderr.write("wrote sitemap.xml\n")
def abosolutify_links(soup, baseurl):
"""Make links in an article absolute.
Parameters
----------
soup : bs4.BeautifulSoup
baseurl : str
"""
for tag in soup.find_all(lambda tag: tag.has_attr("href")):
tag["href"] = urllib.parse.urljoin(baseurl, tag["href"])
for tag in soup.find_all(lambda tag: tag.has_attr("src")):
tag["src"] = urllib.parse.urljoin(baseurl, tag["src"])
def generate_index_and_feed():
"""Generate index.html and feeds (atom and rss)."""
# pylint: disable=too-many-statements,attribute-defined-outside-init,invalid-name
sys.stderr.write("generating atom and rss feeds\n")
# initialize atom feed
feed = AtomFeed()
feed.author = ET.fromstring(
""
"{author}"
"{home}"
"{email}"
"".format(author=AUTHOR, home=BLOG_HOME, email=AUTHOR_EMAIL))
feed.generator = ET.Element("generator", uri=GENERATOR_HOME_PAGE)
feed.generator.text = GENERATOR_NAME
if ATOM_ICON_PATH is not None:
feed.icon = ET.Element("icon")
feed.icon.text = urllib.parse.urljoin(BLOG_HOME, ATOM_ICON_PATH)
feed.id_text = BLOG_HOME
feed.id = ET.Element("id")
feed.id.text = feed.id_text
feed.links = [
ET.Element("link", href=urllib.parse.urljoin(BLOG_HOME, "atom.xml"), rel="self",
type="application/atom+xml"),
ET.Element("link", href=BLOG_HOME, rel="alternate",
type="text/html"),
]
feed.title_text = BLOG_TITLE
feed.title = ET.fromstring("{title}".format(title=BLOG_TITLE))
feed.subtitle_text = BLOG_DESCRIPTION
feed.subtitle = ET.fromstring("{subtitle}"
.format(subtitle=BLOG_DESCRIPTION))
# initialize rss feed
rss = RssFeed()
rss.rssurl = urllib.parse.urljoin(BLOG_HOME, "rss.xml")
rss.title = ET.Element("title")
rss.title.text = BLOG_TITLE
rss.link = ET.Element("link")
rss.link.text = BLOG_HOME
rss.description = ET.Element("description")
rss.description.text = BLOG_DESCRIPTION
rss.language = ET.Element("language")
rss.language.text = LANGUAGE
rss.author_text = "{email} ({name})".format(email=AUTHOR_EMAIL, name=AUTHOR)
rss.managingEditor = ET.Element("managingEditor")
rss.managingEditor.text = rss.author_text
rss.webMaster = ET.Element("webMaster")
rss.webMaster.text = rss.author_text
rss.generator = ET.Element("generator")
rss.generator.text = "{generator} ({url})".format(generator=GENERATOR_NAME,
url=GENERATOR_HOME_PAGE)
rss.image = ET.Element("image")
if RSS_ICON_PATH is not None:
ET.SubElement(rss.image, "url").text = urllib.parse.urljoin(BLOG_HOME, RSS_ICON_PATH)
rss.image.append(copy.deepcopy(rss.title))
rss.image.append(copy.deepcopy(rss.link))
ET.SubElement(rss.image, "width").text = str(RSS_ICON_WIDTH)
ET.SubElement(rss.image, "height").text = str(RSS_ICON_HEIGHT)
# update times will be set after everthing finishes
for name in os.listdir(os.path.join(BUILDDIR, "blog")):
if re.match(r"^[0-9]{4}-[0-9]{2}-[0-9]{2}.*\.html", name):
htmlpath = os.path.join(BUILDDIR, "blog", name)
entry = AtomEntry()
item = RssItem()
try:
with open(htmlpath, encoding="utf-8") as htmlfile:
soup = bs4.BeautifulSoup(htmlfile.read(), "lxml")
# generate atom entry
entry.author = copy.deepcopy(feed.author) # assume it's always the same author
entry_url = urllib.parse.urljoin(BLOG_HOME, "blog/%s" % name)
entry.id_text = entry_url
entry.id = ET.Element("id")
entry.id.text = entry_url
entry.relpath = "/blog/%s" % name
entry.link = ET.Element("link", href=entry_url)
entry.title_text = soup.title.text
entry.title = ET.Element("title", type="html")
entry.title.text = entry.title_text
post_date = soup.find("meta", attrs={"name": "date"})["content"]
entry.updated_datetime = dateutil.parser.parse(post_date)
entry.updated = ET.Element("updated")
# pylint: disable=no-member
entry.updated.text = entry.updated_datetime.isoformat()
# process content
tags_to_remove = []
# mark header and footer for removal
article = soup.article
if article.header is not None:
tags_to_remove.append(article.header)
# mark line numbers for removal
for line_number_span in article.find_all("span",
attrs={"class": "line-number"}):
tags_to_remove.append(line_number_span)
# mark script tags for removal
for script_tag in article.find_all("script"):
tags_to_remove.append(script_tag)
# make internal links absolute
abosolutify_links(article, entry_url)
# remove marked tags
for tag in tags_to_remove:
tag.extract()
entry.content_html = ''.join([str(content)
for content in article.contents])
entry.content = ET.Element("content", type="html")
entry.content.text = ET.CDATA(entry.content_html)
entry.assemble_entry()
feed.entries.append(entry)
# generate rss item
item.title = ET.Element("title")
item.title.text = entry.title_text
item.link = ET.Element("link")
item.link.text = entry_url
item.description = ET.Element("description")
item.description.text = ET.CDATA(entry.content_html)
item.author = ET.Element("author")
item.author.text = rss.author_text
item.guid = ET.Element("guid", isPermaLink="true")
item.guid.text = item.link.text
item.timestamp = entry.updated_datetime.timestamp()
item.pubDate = ET.Element("pubDate")
item.pubDate.text = email.utils.formatdate(item.timestamp, usegmt=True)
item.assemble_item()
rss.items.append(item)
except Exception:
sys.stderr.write("error: failed to generate feed entry from %s\n" % name)
with open(htmlpath, encoding="utf-8") as htmlfile:
sys.stderr.write("dumping HTML:%s\n\n" % htmlfile.read())
raise
# sort entries by reverse chronological order
feed.entries.sort(key=lambda entry: entry.updated_datetime, reverse=True)
rss.items.sort(key=lambda item: item.timestamp, reverse=True)
generate_index(feed)
feed.updated_datetime = current_datetime()
feed.updated = ET.Element("updated")
feed.updated.text = feed.updated_datetime.isoformat()
rss.update_timestamp = time.time()
rss.pubDate = ET.Element("pubDate")
rss.pubDate.text = email.utils.formatdate(rss.update_timestamp, usegmt=True)
rss.lastBuildDate = ET.Element("lastBuildDate")
rss.lastBuildDate.text = email.utils.formatdate(rss.update_timestamp, usegmt=True)
with open(ATOM, "w", encoding="utf-8") as atom:
atom.write("%s\n" % feed.dump_feed())
sys.stderr.write("wrote atom.xml\n")
with open(RSS, "w", encoding="utf-8") as rssxml:
rssxml.write("%s\n" % rss.dump_rss())
sys.stderr.write("wrote rss.xml\n")
generate_sitemap(feed)
def _pre_tag_insert_line_numbers(soup, pre_tag):
"""Insert line numbers to a pre tag."""
num_lines = len(pre_tag.text.split("\n"))
for line_number in range(1, num_lines + 1):
# line number divs will look like:
#
#
ln_tag = soup.new_tag("span")
ln_tag["class"] = "line-number"
ln_tag["data-line"] = line_number
ln_tag["style"] = "top: %.2fem" % ((line_number - 1) * 1.35)
# add a comment to the content of the span to suppress tidy5
# empty tag warning
ln_tag.append(soup.new_string("", bs4.Comment))
pre_tag.code.append(ln_tag)
def number_code_lines(soup):
"""Insert line numbers to preformatted code blocks."""
for pre_tag in soup.find_all("pre"):
if ((pre_tag.code is None or "class" not in pre_tag.attrs or
not "sourceCode" in pre_tag["class"])):
# not really a block of source code
continue
_pre_tag_insert_line_numbers(soup, pre_tag)
def link_img_tags(soup):
"""Convert each tag in to a link to its original."""
if not soup.article:
return
for img_tag in soup.article.find_all("img"):
a_tag = soup.new_tag("a", href=img_tag["src"], target="_blank")
a_tag.insert(0, copy.copy(img_tag))
img_tag.replace_with(a_tag)
def process_footnote_backlinks(soup):
"""Add class attribute "footnotes-backlink" to each footnote backlink."""
for footnotes in soup.find_all("div", attrs={"class": "footnotes"}):
for fn_a_tag in footnotes.find_all(lambda tag:
tag.name == "a" and
tag.has_attr("href") and
tag["href"].startswith("#fnref") and
tag.string == "\u21A9"): # U+21A9: LEFTWARDS ARROW WITH HOOK
fn_a_tag["class"] = "footnotes-backlink"
fn_a_tag.string = "\u21A9\uFE0E" # U+FE0E: VARIATION SELECTOR-15
def postprocess_html_file(htmlfilepath):
"""Perform a series of postprocessing to an HTML file."""
with open(htmlfilepath, "r+", encoding="utf-8") as htmlfileobj:
soup = bs4.BeautifulSoup(htmlfileobj.read(), "lxml")
# a series of postprocessing (extensible)
number_code_lines(soup)
link_img_tags(soup)
process_footnote_backlinks(soup)
# write back
htmlfileobj.seek(0)
htmlfileobj.write(str(soup))
htmlfileobj.truncate()
def generate_blog(fresh=False, report_total_errors=True):
"""Generate the blog in BUILDDIR.
Parameters
----------
fresh : bool
If True, remove all existing build artifects and start afresh;
otherwise, only copy or build new or modified files. Default is
False.
report_total_errors : bool
If True, a line will be printed to stderr at the end of build
(assuming the function doesn't raise early) reporting the total
number of errors, e.g., "build finished with 0 errors". This is
turned on by default, but pass False to turn it off, which will
result in a completely silent session if nothing changed. This
is useful for auto-regen, for instance.
Returns
-------
failed_builds : int
Number of build failures.
"""
# pylint: disable=too-many-branches,too-many-locals,too-many-statements
if not os.path.isdir(SOURCEDIR):
raise OSError("source directory %s does not exist" % SOURCEDIR)
if not os.path.exists(HTMLTEMPLATE):
raise OSError("HTML template %s not found" % HTMLTEMPLATE)
if not os.path.isdir(BUILDDIR):
if os.path.exists(BUILDDIR):
os.remove(BUILDDIR)
os.mkdir(BUILDDIR, mode=0o755)
if fresh:
for name in os.listdir(BUILDDIR):
if name == ".git":
continue
obj = os.path.join(BUILDDIR, name)
if os.path.isdir(obj):
shutil.rmtree(obj)
else:
os.remove(obj)
# nojekyll: https://help.github.com/articles/files-that-start-with-an-underscore-are-missing/
if not os.path.exists(os.path.join(BUILDDIR, ".nojekyll")):
with open(os.path.join(BUILDDIR, ".nojekyll"), "w") as fileobj:
pass
failed_builds = 0
generator_mtime = os.path.getmtime(GENERATORSOURCE)
template_mtime = os.path.getmtime(HTMLTEMPLATE)
fundamental_mtime = max(generator_mtime, template_mtime)
anything_modified = False
for root, _, files in os.walk(SOURCEDIR):
relroot = os.path.relpath(root, start=SOURCEDIR)
dstroot = os.path.join(BUILDDIR, relroot)
if not os.path.isdir(dstroot):
if os.path.exists(dstroot):
os.remove(dstroot)
os.mkdir(dstroot, mode=0o755)
for name in files:
if name.startswith('.'):
continue
extension = name.split(".")[-1]
if extension not in ["css", "jpg", "md", "png", "svg", "ico", "txt"]:
continue
relpath = os.path.join(relroot, name)
srcpath = os.path.join(root, name)
if extension == "md":
dstpath = os.path.join(dstroot, re.sub(r'\.md$', '.html', name))
else:
dstpath = os.path.join(dstroot, name)
if ((not os.path.exists(dstpath) or
os.path.getmtime(dstpath) <=
max(fundamental_mtime, os.path.getmtime(srcpath)))):
# new post or modified post
anything_modified = True
if srcpath == INDEXMD:
continue # index will be processed separately
if extension in ["css", "jpg", "png", "svg", "ico", "txt"]:
sys.stderr.write("copying %s\n" % relpath)
shutil.copy(srcpath, dstpath)
elif extension == "md":
sys.stderr.write("compiling %s\n" % relpath)
pandoc_args = [
"pandoc", srcpath,
"--template", HTMLTEMPLATE,
"--highlight-style=pygments",
"-o", dstpath,
]
try:
subprocess.check_call(pandoc_args)
except subprocess.CalledProcessError:
failed_builds += 1
sys.stderr.write("error: failed to generate %s" %
relpath)
# postprocess generated HTML file
postprocess_html_file(dstpath)
if anything_modified:
generate_index_and_feed()
sys.stderr.write("done\n")
if report_total_errors:
sys.stderr.write("build finished with %d errors\n" % failed_builds)
return failed_builds
def generate(args):
"""Wrapper for generate_blog(fresh=False)."""
# pylint: disable=unused-argument
exit(generate_blog(fresh=False))
def regenerate(args):
"""Wrapper for generate_blog(fresh=True)."""
# pylint: disable=unused-argument
exit(generate_blog(fresh=True))
def sanitize(string):
"""Sanitize string (title) for URI consumption."""
if isinstance(string, bytes):
string = string.decode('utf-8')
# to lowercase
string = string.lower()
# strip all non-word, non-hyphen and non-whitespace characters
string = re.sub(r"[^\w\s-]", "", string)
# replace consecutive whitespaces with a single hyphen
string = re.sub(r"\s+", "-", string)
# percent encode the result
return urllib.parse.quote(string)
def new_post(title):
"""Create a new post with metadata pre-filled.
The path to the new post is printed to stdout.
Returns
-------
0
On success.
"""
date = current_datetime()
filename_date = date.strftime("%Y-%m-%d")
iso_date = date.isoformat()
display_date = "%s %d, %d" % (date.strftime("%B"), date.day, date.year)
title_sanitized = sanitize(title)
filename = "%s-%s.md" % (filename_date, title_sanitized)
fullpath = os.path.join(POSTSDIR, filename)
if not os.path.isdir(POSTSDIR):
if os.path.exists(POSTSDIR):
os.remove(POSTSDIR)
os.mkdir(POSTSDIR, mode=0o755)
with open(fullpath, 'w', encoding='utf-8') as newpost:
newpost.write("---\n")
newpost.write('title: "%s"\n' % title)
newpost.write("date: %s\n" % iso_date)
newpost.write("date-display: %s\n" % display_date)
newpost.write("---\n")
sys.stderr.write("New post created in:\n")
print(fullpath)
return 0
def new_post_cli(args):
"""CLI wrapper around new_post."""
new_post(args.title)
def touch(filename):
"""Update the timestamp of a post to the current time."""
filename = os.path.basename(filename)
fullpath = os.path.join(POSTSDIR, filename)
if not os.path.exists(fullpath):
sys.stderr.write("%serror: post %s not found %s\n" %
(RED, fullpath, RESET))
return 1
filename_prefix_re = re.compile(r"^[0-9]{4}-[0-9]{2}-[0-9]{2}")
if not filename_prefix_re.match(filename):
sys.stderr.write(RED)
sys.stderr.write("error: post %s is not a valid post\n" % filename)
sys.stderr.write("error: the filename of a valid post begins with "
"a date in the form xxxx-xx-xx\n")
sys.stderr.write(RESET)
return 1
# update timestamp in the metadata section of the post
whatchanged = io.StringIO()
date = current_datetime()
iso_date = date.isoformat()
display_date = "%s %d, %d" % (date.strftime("%B"), date.day, date.year)
filename_date = date.strftime("%Y-%m-%d")
with fileinput.input(files=(fullpath), inplace=True) as lines:
meta_fences = 0
for line in lines:
if line.startswith("---"):
meta_fences += 1
sys.stdout.write(line)
continue
if meta_fences >= 2:
# already went past the metadata section
sys.stdout.write(line)
continue
if line.startswith("date: "):
updated_line = "date: %s\n" % iso_date
sys.stdout.write(updated_line)
whatchanged.write("-%s+%s\n" % (line, updated_line))
continue
if line.startswith("date-display: "):
updated_line = "date-display: %s\n" % display_date
sys.stdout.write(updated_line)
whatchanged.write("-%s+%s\n" % (line, updated_line))
continue
sys.stdout.write(line)
sys.stderr.write("\n%schangeset:%s\n\n%s" %
(YELLOW, RESET, whatchanged.getvalue()))
whatchanged.close()
# check if the file needs to be renamed
new_filename = filename_prefix_re.sub(filename_date, filename)
if new_filename != filename:
new_fullpath = os.path.join(POSTSDIR, new_filename)
os.rename(fullpath, new_fullpath)
sys.stderr.write("%srenamed to %s%s\n" % (YELLOW, new_filename, RESET))
return 0
def touch_cli(args):
"""CLI wrapper around touch."""
touch(args.filename)
def deploy(args):
"""Deploys build directory to origin/master without regenerating.
Returns
-------
0
On success. Exit early with nonzero status otherwise.
"""
# pylint: disable=unused-argument,too-many-statements
# check whether root is dirty
os.chdir(ROOTDIR)
dirty = subprocess.check_output(["git", "status", "--porcelain"])
if dirty:
sys.stderr.write(YELLOW)
sys.stderr.write("Project root is dirty.\n")
sys.stderr.write("You may want to commit in your changes "
"to the source branch, since the SHA and title "
"of the latest commit on the source branch will be "
"incorporated into the commit message on "
"the deployment branch.\n")
sys.stderr.write(RESET)
while True:
sys.stderr.write("Continue? [yN] ")
answer = input()
if not answer:
# default
abort = True
break
elif answer.startswith(('y', 'Y')):
abort = False
break
elif answer.startswith(('n', 'N')):
abort = True
break
else:
sys.stderr.write("Please answer yes or no.\n")
if abort:
sys.stderr.write("%saborting deployment%s\n" % (RED, RESET))
return 1
# extract latest commit on the source branch
source_commit = subprocess.check_output(
["git", "log", "-1", "--pretty=oneline", "source", "--"]).decode('utf-8').strip()
# cd into BUILDDIR and assemble commit message
sys.stderr.write("%scommand: cd '%s'%s\n" % (BLUE, BUILDDIR, RESET))
os.chdir(BUILDDIR)
# extract updated time from atom.xml
if not os.path.exists("atom.xml"):
sys.stderr.write("atom.xml not found, cannot deploy\naborting\n")
return 1
atomxml = ET.parse("atom.xml").getroot()
updated = atomxml.find('{http://www.w3.org/2005/Atom}updated').text
commit_message = ("Site updated at %s\n\nsource branch was at:\n%s\n" %
(updated, source_commit))
# commit changes in BUILDDIR
sys.stderr.write("%scommand: git add --all%s\n" % (BLUE, RESET))
subprocess.check_call(["git", "add", "--all"])
sys.stderr.write("%scommand: git commit --gpg-sign --message='%s'%s\n" %
(BLUE, commit_message, RESET))
try:
subprocess.check_call(["git", "commit", "--gpg-sign",
"--message=%s" % commit_message])
except subprocess.CalledProcessError:
sys.stderr.write("\n%serror: git commit failed%s\n" % (RED, RESET))
return 1
# check dirty status
dirty = subprocess.check_output(["git", "status", "--porcelain"])
if dirty:
sys.stderr.write(RED)
sys.stderr.write("error: failed to commit all changes; "
"build directory still dirty\n")
sys.stderr.write("error: please manually inspect what was left out\n")
sys.stderr.write(RESET)
return 1
# push to origin/master
sys.stderr.write("%scommand: git push origin master%s\n" % (BLUE, RESET))
try:
subprocess.check_call(["git", "push", "origin", "master"])
except subprocess.CalledProcessError:
sys.stderr.write("\n%serror: git push failed%s\n" % (RED, RESET))
return 1
return 0
def gen_deploy(args):
"""Regenerate and deploy."""
# pylint: disable=unused-argument,too-many-branches
# try to smartly determine the latest post, and prompt to touch it
current_time = time.time()
latest_post = None
latest_postdate = 0
latest_mtime = 0
for name in os.listdir(POSTSDIR):
matchobj = re.match(r"^([0-9]{4})-([0-9]{2})-([0-9]{2})-.*\.md", name)
if not matchobj:
continue
fullpath = os.path.join(POSTSDIR, name)
mtime = os.path.getmtime(fullpath)
# get post date from the date metadata field of the post
postdate = 0
with open(fullpath) as postobj:
for line in postobj:
dateregex = r"^date: (\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}-\d{2}:?\d{2})"
datematch = re.match(dateregex, line.rstrip())
if datematch:
postdate = dateutil.parser.parse(datematch.group(1)).timestamp()
break
# skip the post if it is dated more than three days ago
if current_time - postdate > 3 * 24 * 3600:
continue
if mtime > latest_mtime:
latest_post = name
latest_postdate = postdate
latest_mtime = mtime
# prompt for touching if the latest post determined above was
# modified within the last hour but the date registered in the post
# isn't within the last ten minutes
if ((latest_post is not None and current_time - latest_mtime < 3600 and
current_time - latest_postdate > 600)):
sys.stderr.write("%sIt appears that %s might be a new post.\n"
"Do you want to touch its timestamp?%s\n" %
(GREEN, latest_post, RESET))
while True:
yesnoquit = input("[ynq]: ")
if yesnoquit.startswith(("Y", "y")):
yesno = True
break
elif yesnoquit.startswith(("N", "n")):
yesno = False
break
elif yesnoquit.startswith(("Q", "q")):
sys.stderr.write("%saborting gen_deploy%s\n" % (RED, RESET))
return 1
else:
sys.stderr.write("Please answer yes, no, or quit.\n")
if yesno:
sys.stderr.write("%stouching %s%s\n" % (BLUE, latest_post, RESET))
touch(latest_post)
sys.stderr.write("\n")
generate_blog(fresh=True)
deploy(None)
class HTTPServerProcess(multiprocessing.Process):
"""This class can be used to run an HTTP server."""
def __init__(self, rootdir):
"""Initialize the HTTPServerProcess class.
Parameters
----------
rootdir : str
The root directory to serve from.
"""
super().__init__()
self.rootdir = rootdir
def run(self):
"""Create an HTTP server and serve forever.
Runs on localhost. The default port is 8000; if it is not
available, a random port is used instead.
"""
os.chdir(self.rootdir)
# pylint: disable=invalid-name
HandlerClass = http.server.SimpleHTTPRequestHandler
try:
httpd = http.server.HTTPServer(("", 8000), HandlerClass)
except OSError:
httpd = http.server.HTTPServer(("", 0), HandlerClass)
_, portnumber = httpd.socket.getsockname()
sys.stderr.write("server serving on http://localhost:%d\n" % portnumber)
try:
httpd.serve_forever()
except KeyboardInterrupt:
httpd.shutdown()
def preview(args):
"""Serve the blog and auto regenerate upon changes."""
# pylint: disable=unused-argument
server_process = HTTPServerProcess(BUILDDIR)
server_process.start()
sys.stderr.write("watching for changes\n")
sys.stderr.write("send SIGINT to stop\n")
# install a SIGINT handler only for this process
sigint_raised = False
def sigint_mitigator(signum, frame):
"""Translate SIGINT to setting the sigint_raised flag."""
nonlocal sigint_raised
sigint_raised = True
signal.signal(signal.SIGINT, sigint_mitigator)
# Watch and auto-regen.
# No need to actually implement watch separately, since
# generate_blog(fresh=False, report_total_errors=False) already
# watches for modifications and only regens upon changes, and it is
# completely silent when there's no change.
while not sigint_raised:
generate_blog(fresh=False, report_total_errors=False)
time.sleep(0.5)
sys.stderr.write("\nSIGINT received, cleaning up...\n")
server_process.join()
return 0
def main():
"""CLI interface."""
description = "Simple blog generator in Python with Pandoc as backend."
parser = argparse.ArgumentParser(description=description)
subparsers = parser.add_subparsers(dest="action")
subparsers.required = True
parser_new_post = subparsers.add_parser(
"new_post", aliases=["n", "new"],
description="Create a new post with metadata pre-filled.")
parser_new_post.add_argument("title", help="title of the new post")
parser_new_post.set_defaults(func=new_post_cli)
parser_new_post = subparsers.add_parser(
"touch", aliases=["t", "tou"],
description="""Touch an existing post, i.e., update its
timestamp to current time. Why is this ever useful? Well, the
timestamp filled in by new_post is the time of creation, but one
might spend several hours after the creation of the file to
finish the post. Sometimes the post is even created on one day
and finished on another (say created at 11pm and finished at
1am). Therefore, one may want to retouch the timestamp before
publishing.""")
parser_new_post.add_argument("filename",
help="path or basename of the source file, "
"e.g., 2015-05-05-new-blog-new-start.md")
parser_new_post.set_defaults(func=touch_cli)
parser_generate = subparsers.add_parser(
"generate", aliases=["g", "gen"],
description="Generate new or changed objects.")
parser_generate.set_defaults(func=generate)
parser_regenerate = subparsers.add_parser(
"regenerate", aliases=["r", "regen"],
description="Regenerate the entire blog afresh.")
parser_regenerate.set_defaults(func=regenerate)
parser_new_post = subparsers.add_parser(
"preview", aliases=["p", "pre"],
description="Serve the blog locally and auto regenerate upon changes.")
parser_new_post.set_defaults(func=preview)
parser_new_post = subparsers.add_parser(
"deploy", aliases=["d", "dep"],
description="Deploy build/ to origin/master without regenerating.")
parser_new_post.set_defaults(func=deploy)
parser_new_post = subparsers.add_parser(
"gen_deploy", aliases=["gd", "gendep"],
description="Rebuild entire blog and deploy build/ to origin/master.")
parser_new_post.set_defaults(func=gen_deploy)
with init_colorama():
args = parser.parse_args()
returncode = args.func(args)
exit(returncode)
if __name__ == '__main__':
main()