#!/usr/bin/env python3

"""A simple blog generator with Pandoc as backend."""

# TODO: put blog configurations in a config file
# TODO: auto retouch: prompt for git commit amend after touching
# (display commit message to avoid amending the wrong commit)

# pylint: disable=too-many-lines

import argparse
from contextlib import contextmanager
import copy
import curses
import datetime
import email.utils
import fileinput
import io
import http.client
import http.server
import multiprocessing
import os
import re
import shutil
import signal
import string
import subprocess
import sys
import tempfile
import time
import urllib.parse

import blessed
import bs4
import colorama
import dateutil.parser
import dateutil.tz
import lxml.etree as ET

############################# BLOG CONFIGURATIONS ##############################
# Safe to customize
BLOG_HOME = "http://zmwangx.github.io/"
BLOG_TITLE = "dl? cmplnts?"
BLOG_DESCRIPTION = "Zhiming Wang's personal blog"
LANGUAGE = "en-us"
AUTHOR = "Zhiming Wang"
AUTHOR_EMAIL = "zmwangx@gmail.com"
ATOM_ICON_PATH = "img/icon-400.png"  # set to None to leave it out
RSS_ICON_PATH = "img/icon-100.png"  # set to None to leave it out
RSS_ICON_WIDTH = 100
RSS_ICON_HEIGHT = 100
########################## END OF BLOG CONFIGURATIONS ##########################


########################### GENERATOR CONFIGURATIONS ###########################
# Do not touch unless you know what you are doing.
GENERATOR_NAME = "pyblog"
GENERATOR_HOME_PAGE = "https://github.com/zmwangx/zmwangx.github.io"

ROOTDIR = os.path.dirname(os.path.realpath(__file__))
SOURCEDIR = os.path.join(ROOTDIR, "source")
POSTSDIR = os.path.join(SOURCEDIR, "blog")
INDEXMD = os.path.join(SOURCEDIR, "index.md")
GENERATORSOURCE = os.path.join(ROOTDIR, "pyblog")
HTMLTEMPLATE = os.path.join(SOURCEDIR, "template.html")
BUILDDIR = os.path.join(ROOTDIR, "build")
ATOM = os.path.join(BUILDDIR, "atom.xml")
RSS = os.path.join(BUILDDIR, "rss.xml")
INDEXHTML = os.path.join(BUILDDIR, "index.html")
EXCLUDELIST = os.path.join(SOURCEDIR, ".exclude")

FEED_MAX_ENTRIES = 20
CODE_LINE_HEIGHT = 18
####################### END OF GENERATOR CONFIGURATIONS ########################


# declare the global foreground ANSI codes
BLACK = ""
BLUE = ""
CYAN = ""
GREEN = ""
MAGENTA = ""
RED = ""
WHITE = ""
YELLOW = ""
RESET = ""

@contextmanager
def init_colorama():
    """Set global foreground modifying ANSI codes.

    BLACK, BLUE, CYAN, GREEN, MAGENTA, RED, WHITE, YELLOW, and RESET.

    """

    # pylint: disable=exec-used,invalid-name

    colorama.init()
    for color, ansi in colorama.Fore.__dict__.items():
        exec("global {0}; {0} = '{1}'".format(color, ansi))
    yield
    for color in colorama.Fore.__dict__:
        exec("global {0}; {0} = ''".format(color))
    colorama.deinit()


def current_datetime():
    """Return the current datetime, complete with tzinfo.

    Precision is one second. Timezone is the local timezone.
    """
    return datetime.datetime.fromtimestamp(round(time.time()),
                                           dateutil.tz.tzlocal())


class AtomFeed(object):
    """Class for storing atom:feed data and metadata.

    https://tools.ietf.org/html/rfc4287.

    """

    # pylint: disable=invalid-name,too-many-instance-attributes

    def __init__(self):
        """Define available attributes."""
        self.author = None  # atom:author
        self.generator = None  # atom:generator, optional
        self.icon = None  # atom:icon, optional
        self.logo = None  # atom:logo, optional
        self.id_text = None  # atom:id, just use URI
        self.id = None  # atom:id
        self.links = []  # list of atom:link
        self.title_text = None  # the text of atom:title
        self.title = None  # atom:title
        self.subtitle_text = None  # the text of atom:subtitle
        self.subtitle = None  # atom:subtitle
        self.updated_datetime = None  # update time as a datetime object
        self.updated = None  # atom:updated
        self.entries = []  # list of atom:entry, in reverse time order
        self.feed = None  # atom:feed, assembled

    def assemble_feed(self):
        """Assemble atom:feed."""
        # pylint: disable=multiple-statements
        self.feed = ET.Element("feed", xmlns="http://www.w3.org/2005/Atom")
        self.feed.append(self.title)
        if self.subtitle is not None: self.feed.append(self.subtitle)
        for link in self.links:
            self.feed.append(link)
        self.feed.append(self.updated)
        self.feed.append(self.id)
        self.feed.append(self.author)
        if self.icon is not None: self.feed.append(self.icon)
        if self.logo is not None: self.feed.append(self.icon)
        if self.generator is not None: self.feed.append(self.generator)
        # include at most FEED_MAX_ENTRIES entries in the feed
        for entry in self.entries[:FEED_MAX_ENTRIES]:
            self.feed.append(entry.entry)

    def dump_feed(self):
        """Dump atom:feed XML."""
        if self.feed is None:
            self.assemble_feed()
        return ET.tostring(self.feed).decode("utf-8")


class AtomEntry(object):
    """Class for storing atom:entry data and metadata."""

    # pylint: disable=invalid-name,too-many-instance-attributes

    def __init__(self):
        """Define available attributes."""
        self.author = None  # atom:author
        self.id_text = None  # atom:id, just use URI
        self.id = None  # atom:id
        self.relpath = None  # HTML page path relative to home
        self.link = None  # atom:link
        self.title_text = None  # plain text title
        self.title = None  # atom:title
        self.updated_datetime = None  # update time as a datetime object
        self.updated = None  # atom:updated
        self.content_html = None  # content as HTML markup
        self.content = None  # atom:content
        self.entry = None  # atom:entry, assembled

    def assemble_entry(self):
        """Assemble atom:entry."""
        self.entry = ET.Element("entry")
        self.entry.append(self.title)
        self.entry.append(self.link)
        self.entry.append(self.updated)
        self.entry.append(self.id)
        self.entry.append(self.author)
        self.entry.append(self.content)

    def dump_entry(self):
        """Dump atom:entry XML."""
        if self.entry is None:
            self.assemble_entry()
        return ET.tostring(self.entry).decode("utf-8")


class RssFeed(object):
    """Class for storing an RSS 2.0 feed.

    https://validator.w3.org/feed/docs/rss2.html.

    """

    # pylint: disable=too-many-instance-attributes

    REQUIRED_ELEMENTS = ["title", "link", "description"]
    OPTIONAL_ELEMENTS = ["language", "copyright", "managingEditor", "webMaster",
                         "pubDate", "lastBuildDate", "category", "generator",
                         "docs", "cloud", "ttl", "image", "textInput",
                         "skipHours", "skipDays"]

    def __init__(self):
        """Define available attributes."""
        self.rssurl = None  # the URL of the rss feed
        self.atomlink = None
        for element in self.REQUIRED_ELEMENTS:
            setattr(self, element, None)
        for element in self.OPTIONAL_ELEMENTS:
            setattr(self, element, None)
        self.docs = ET.Element("docs")
        self.docs.text = "https://validator.w3.org/feed/docs/rss2.html"
        self.author_text = None
        self.update_timestamp = None
        self.items = []
        self.rss = None
        self.channel = None

    def assemble_rss(self):
        """Assemble RSS 2.0 feed."""
        self.rss = ET.Element("rss", version="2.0", nsmap={"atom": "http://www.w3.org/2005/Atom"})
        self.channel = ET.SubElement(self.rss, "channel")
        # https://validator.w3.org/feed/docs/warning/MissingAtomSelfLink.html
        self.atomlink = ET.SubElement(self.channel, "{http://www.w3.org/2005/Atom}link",
                                      href=self.rssurl, rel="self", type="application/rss+xml")
        for element in self.REQUIRED_ELEMENTS:
            self.channel.append(getattr(self, element))
        for element in self.OPTIONAL_ELEMENTS:
            attr = getattr(self, element)
            if attr is not None:
                self.channel.append(attr)
        # include at most FEED_MAX_ENTRIES items in the RSS feed
        for item in self.items[:FEED_MAX_ENTRIES]:
            self.channel.append(item.item)

    def dump_rss(self):
        """Dump RSS feed XML."""
        if self.rss is None:
            self.assemble_rss()
        return ET.tostring(self.rss).decode("utf-8")


class RssItem(object):
    """Class for storing an RSS 2.0 item."""

    ELEMENTS = ["title", "link", "description", "author", "category", "comments",
                "enclosure", "guid", "pubDate", "source"]

    def __init__(self):
        """Define available attributes."""
        for element in self.ELEMENTS:
            setattr(self, element, None)
        self.timestamp = None
        self.item = None

    def assemble_item(self):
        """Assemble an RSS 2.0 item."""
        self.item = ET.Element("item")
        for element in self.ELEMENTS:
            attr = getattr(self, element)
            if attr is not None:
                self.item.append(attr)

    def dump_item(self):
        """Dump RSS item XML."""
        if self.item is None:
            self.assemble_item()
        return ET.tostring(self.item).decode("utf-8")


def generate_index(feed):
    """Generate index.html from index.md and a TOC."""

    sys.stderr.write("generating index.html\n")

    # generate TOC
    tocbuff = io.StringIO()
    tocbuff.write('<div class="blog-index" id="toc">')
    year = 10000  # will be larger than the latest year for quite a while
    # recall that entries are in reverse chronological order
    table_opened = False
    for entry in feed.entries:
        date = entry.updated_datetime
        if date.year < year:
            # close the previous table if there is one
            if table_opened:
                tocbuff.write(u'</table>\n')
            # write a new <h2 class="blog-index-year-title"> tag with the smaller year
            year = date.year
            tocbuff.write(u'\n<h2 class="blog-index-year-title" id="{0}">{0}</h2>\n\n'.format(year))
            tocbuff.write(u'<table class="blog-index-yearly-index">\n')
            table_opened = True

        # write a new table row entry in Markdown, in the format:
        #
        #   <tr>
        #     <td class="blog-index-post-date"><time class="date" datetime="2015-05-05T00:06:04-0700">May 5</time></td>
        #     <td class="blog-index-post-title">[Blah blah](/blog/2015-05-04-blah-blah.html)</td>
        #   </tr>
        monthday = date.strftime("%b %d")
        tocbuff.write(u'<tr><td class="blog-index-post-date"><time class="date" datetime="%s">%s</time></td>'
                      '<td class="blog-index-post-title">[%s](%s)</td></tr>\n' %
                      (date.isoformat(), monthday, entry.title_text, entry.relpath))
    if table_opened:
        tocbuff.write(u'</table>\n')
    tocbuff.write('</div>')

    # create tempfile with index.md and the TOC concatenated, and generate index.html from that
    # pylint: disable=invalid-name
    fd, tmppath = tempfile.mkstemp()
    os.close(fd)
    with open(tmppath, 'w', encoding='utf-8') as tmpfile:
        if os.path.exists(INDEXMD):
            with open(INDEXMD, 'r', encoding='utf-8') as indexmd:
                tmpfile.write(u"%s\n\n<hr>\n\n" % indexmd.read())
        tmpfile.write("%s\n" % tocbuff.getvalue())
        tocbuff.close()

    pandoc_args = [
        "pandoc", tmppath,
        "--template", HTMLTEMPLATE,
        "--highlight-style=pygments",
        "-o", INDEXHTML,
    ]
    try:
        subprocess.check_call(pandoc_args)
    except subprocess.CalledProcessError:
        sys.stderr.write("error: failed to generate index.html\n")
    os.remove(tmppath)


def make_sitemap_url_element(link, updated=None, changefreq=None, priority=None):
    """Make a sitemap <url> element.

    Parameters
    ----------
    link : str or xml.etree.ElementTree.Element
        If using an xml.etree.ElementTree.Element element, then it shall
        be an atom:link element, e.g., <link href="http://zmwangx.github.io/"/>.
    updated : datetime or xml.etree.ElementTree.Element, optional
        If using an xml.etree.ElementTree.Element element, then it shall
        be an atom:updated element, e.g.,
        <updated>2015-05-05T22:38:42-07:00</updated>.
    changefreq : {"always", "hourly", "daily", "weekly", "monthly", "yearly", "never"}, optional
    priority : {1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1}, optional

    """

    urlelem = ET.Element("url")
    loc = ET.Element("loc")
    loc.text = link.attrib["href"] if isinstance(link, ET._Element) else link
    urlelem.append(loc)
    if updated is not None:
        lastmod = ET.Element("lastmod")
        lastmod.text = (updated.text if isinstance(updated, ET._Element)
                        else updated.isoformat())
        urlelem.append(lastmod)
    if changefreq is not None:
        changefreq_elem = ET.Element("changefreq")
        changefreq_elem.text = changefreq
        urlelem.append(changefreq_elem)
    if priority is not None:
        priority_elem = ET.Element("priority")
        priority_elem.text = "%.1f" % priority
        urlelem.append(priority_elem)
    return urlelem


def generate_sitemap(feed):
    """Generate sitemap.xml."""
    sitemap = ET.Element("urlset", xmlns="http://www.sitemaps.org/schemas/sitemap/0.9")
    # index
    sitemap.append(make_sitemap_url_element(BLOG_HOME, feed.updated, "daily", 1.0))
    # other top level pages
    for name in os.listdir(BUILDDIR):
        if (not name.endswith(".html") or name == "index.html" or
            re.match("google[a-z0-9]+\.html", name)):  # exclude Google's site ownership verification file
            continue
        link = urllib.parse.urljoin(BLOG_HOME, name)
        fullpath = os.path.join(BUILDDIR, name)
        # try to extract updated time
        updated = None
        with open(fullpath, encoding="utf-8") as htmlobj:
            soup = bs4.BeautifulSoup(htmlobj.read(), "lxml")
            if soup.footer is not None:
                updated_tag = soup.footer.find(attrs={"class": "updated"})
                if updated_tag is not None:
                    updated = dateutil.parser.parse(updated_tag.text)
        sitemap.append(make_sitemap_url_element(link, updated, "monthly", 0.9))

    # blog entries
    for entry in feed.entries:
        sitemap.append(make_sitemap_url_element(entry.link, entry.updated, "monthly", 0.9))
    sitemappath = os.path.join(BUILDDIR, "sitemap.xml")
    with open(sitemappath, "w", encoding="utf-8") as sitemapfile:
        sitemapfile.write('<?xml version="1.0" encoding="UTF-8"?>\n%s\n' %
                          ET.tostring(sitemap).decode('utf-8'))
        sys.stderr.write("wrote sitemap.xml\n")


def absolutify_links(soup, baseurl):
    """Make links in an article absolute.

    Parameters
    ----------
    soup : bs4.BeautifulSoup
    baseurl : str

    """
    for tag in soup.find_all(lambda tag: tag.has_attr("href")):
        tag["href"] = urllib.parse.urljoin(baseurl, tag["href"])
    for tag in soup.find_all(lambda tag: tag.has_attr("src")):
        tag["src"] = urllib.parse.urljoin(baseurl, tag["src"])


def generate_index_and_feed():
    """Generate index.html and feeds (atom and rss)."""
    # pylint: disable=too-many-statements,attribute-defined-outside-init,invalid-name
    sys.stderr.write("generating atom and rss feeds\n")
    # initialize atom feed
    feed = AtomFeed()
    feed.author = ET.fromstring(
        "<author>"
        "<name>{author}</name>"
        "<uri>{home}</uri>"
        "<email>{email}</email>"
        "</author>".format(author=AUTHOR, home=BLOG_HOME, email=AUTHOR_EMAIL))
    feed.generator = ET.Element("generator", uri=GENERATOR_HOME_PAGE)
    feed.generator.text = GENERATOR_NAME
    if ATOM_ICON_PATH is not None:
        feed.icon = ET.Element("icon")
        feed.icon.text = urllib.parse.urljoin(BLOG_HOME, ATOM_ICON_PATH)
    feed.id_text = BLOG_HOME
    feed.id = ET.Element("id")
    feed.id.text = feed.id_text
    feed.links = [
        ET.Element("link", href=urllib.parse.urljoin(BLOG_HOME, "atom.xml"), rel="self",
                   type="application/atom+xml"),
        ET.Element("link", href=BLOG_HOME, rel="alternate",
                   type="text/html"),
    ]
    feed.title_text = BLOG_TITLE
    feed.title = ET.fromstring("<title>{title}</title>".format(title=BLOG_TITLE))
    feed.subtitle_text = BLOG_DESCRIPTION
    feed.subtitle = ET.fromstring("<subtitle>{subtitle}</subtitle>"
                                  .format(subtitle=BLOG_DESCRIPTION))
    # initialize rss feed
    rss = RssFeed()
    rss.rssurl = urllib.parse.urljoin(BLOG_HOME, "rss.xml")
    rss.title = ET.Element("title")
    rss.title.text = BLOG_TITLE
    rss.link = ET.Element("link")
    rss.link.text = BLOG_HOME
    rss.description = ET.Element("description")
    rss.description.text = BLOG_DESCRIPTION
    rss.language = ET.Element("language")
    rss.language.text = LANGUAGE
    rss.author_text = "{email} ({name})".format(email=AUTHOR_EMAIL, name=AUTHOR)
    rss.managingEditor = ET.Element("managingEditor")
    rss.managingEditor.text = rss.author_text
    rss.webMaster = ET.Element("webMaster")
    rss.webMaster.text = rss.author_text
    rss.generator = ET.Element("generator")
    rss.generator.text = "{generator} ({url})".format(generator=GENERATOR_NAME,
                                                      url=GENERATOR_HOME_PAGE)
    rss.image = ET.Element("image")
    if RSS_ICON_PATH is not None:
        ET.SubElement(rss.image, "url").text = urllib.parse.urljoin(BLOG_HOME, RSS_ICON_PATH)
        rss.image.append(copy.deepcopy(rss.title))
        rss.image.append(copy.deepcopy(rss.link))
        ET.SubElement(rss.image, "width").text = str(RSS_ICON_WIDTH)
        ET.SubElement(rss.image, "height").text = str(RSS_ICON_HEIGHT)

    # update times will be set after everthing finishes

    for name in os.listdir(os.path.join(BUILDDIR, "blog")):
        if re.match(r"^[0-9]{4}-[0-9]{2}-[0-9]{2}.*\.html", name):
            htmlpath = os.path.join(BUILDDIR, "blog", name)
            entry = AtomEntry()
            item = RssItem()
            try:
                with open(htmlpath, encoding="utf-8") as htmlfile:
                    soup = bs4.BeautifulSoup(htmlfile.read(), "lxml")

                    # generate atom entry
                    entry.author = copy.deepcopy(feed.author)  # assume it's always the same author
                    entry_url = urllib.parse.urljoin(BLOG_HOME, "blog/%s" % name)
                    entry.id_text = entry_url
                    entry.id = ET.Element("id")
                    entry.id.text = entry_url
                    entry.relpath = "/blog/%s" % name
                    entry.link = ET.Element("link", href=entry_url)
                    entry.title_text = soup.title.text
                    entry.title = ET.Element("title", type="html")
                    entry.title.text = entry.title_text
                    post_date = soup.find("meta", attrs={"name": "date"})["content"]
                    entry.updated_datetime = dateutil.parser.parse(post_date)
                    entry.updated = ET.Element("updated")
                    # pylint: disable=no-member
                    entry.updated.text = entry.updated_datetime.isoformat()

                    # process content
                    tags_to_remove = []
                    # mark header and footer for removal
                    article = soup.article
                    if article.header is not None:
                        tags_to_remove.append(article.header)
                    # mark line numbers for removal
                    for line_number_span in article.find_all("span",
                                                             attrs={"class": "line-number"}):
                        tags_to_remove.append(line_number_span)
                    # mark script tags for removal
                    for script_tag in article.find_all("script"):
                        tags_to_remove.append(script_tag)
                    # make internal links absolute
                    absolutify_links(article, entry_url)
                    # remove marked tags
                    for tag in tags_to_remove:
                        tag.extract()

                    entry.content_html = ''.join([str(content)
                                                  for content in article.contents])
                    entry.content = ET.Element("content", type="html")
                    entry.content.text = ET.CDATA(entry.content_html)
                    entry.assemble_entry()
                    feed.entries.append(entry)

                    # generate rss item
                    item.title = ET.Element("title")
                    item.title.text = entry.title_text
                    item.link = ET.Element("link")
                    item.link.text = entry_url
                    item.description = ET.Element("description")
                    item.description.text = ET.CDATA(entry.content_html)
                    item.author = ET.Element("author")
                    item.author.text = rss.author_text
                    item.guid = ET.Element("guid", isPermaLink="true")
                    item.guid.text = item.link.text
                    item.timestamp = entry.updated_datetime.timestamp()
                    item.pubDate = ET.Element("pubDate")
                    item.pubDate.text = email.utils.formatdate(item.timestamp, usegmt=True)
                    item.assemble_item()
                    rss.items.append(item)
            except Exception:
                sys.stderr.write("error: failed to generate feed entry from %s\n" % name)
                with open(htmlpath, encoding="utf-8") as htmlfile:
                    sys.stderr.write("dumping HTML:%s\n\n" % htmlfile.read())
                raise
    # sort entries by reverse chronological order
    feed.entries.sort(key=lambda entry: entry.updated_datetime, reverse=True)
    rss.items.sort(key=lambda item: item.timestamp, reverse=True)

    generate_index(feed)

    feed.updated_datetime = current_datetime()
    feed.updated = ET.Element("updated")
    feed.updated.text = feed.updated_datetime.isoformat()

    rss.update_timestamp = time.time()
    rss.pubDate = ET.Element("pubDate")
    rss.pubDate.text = email.utils.formatdate(rss.update_timestamp, usegmt=True)
    rss.lastBuildDate = ET.Element("lastBuildDate")
    rss.lastBuildDate.text = email.utils.formatdate(rss.update_timestamp, usegmt=True)

    with open(ATOM, "w", encoding="utf-8") as atom:
        atom.write("%s\n" % feed.dump_feed())
        sys.stderr.write("wrote atom.xml\n")

    with open(RSS, "w", encoding="utf-8") as rssxml:
        rssxml.write("%s\n" % rss.dump_rss())
        sys.stderr.write("wrote rss.xml\n")

    generate_sitemap(feed)


def _pre_tag_insert_line_numbers(soup, pre_tag):
    """Insert line numbers to a pre tag."""
    num_lines = len(pre_tag.text.split("\n"))
    for line_number in range(1, num_lines + 1):
        # line number divs will look like:
        # <span class="line-number" data-line="1" style="top: 0em"><!----></span>
        # <span class="line-number" data-line="2" style="top: 1.35em"><!----></span>
        ln_tag = soup.new_tag("span")
        ln_tag["class"] = "line-number"
        ln_tag["data-line"] = line_number
        ln_tag["style"] = "top: %.2fem" % ((line_number - 1) * 1.35)
        # add a comment to the content of the span to suppress tidy5
        # empty <span> tag warning
        ln_tag.append(soup.new_string("", bs4.Comment))
        pre_tag.code.append(ln_tag)


# MARKDOWN EXTENSION!
#
# See docstring of process_image_sizes for documentation.

# If matched, 1st group is width, 3rd group (optional) is height, and
# 4th group is actual text.
IMAGESIZE_EXTRACTOR = re.compile(r'\|(\d+)(x(\d+))?\|\s*(.*)')

def process_image_sizes(soup):
    """Process the image size Markdown extension.

    Allows specifying image size in a Markdown image construct
    ![](). The syntax is:

        ![|width(xheight)?| alt](src)

    where width and height are positive integers (xheight is optional),
    and alt is the regular alt string (either plain or with some
    Markdown formatting). alt string, as usual, is optional.

    Examples:

        ![|1920x1080| Hello, world!](http://example.com/hello.png)
        ![|1920| *Hey!*](http://example.com/hey.png)
        ![|1280x800|](http://example.com/noalt.png)

    """
    if not soup.article:
        return
    for img_tag in soup.article.find_all("img"):
        if img_tag.has_attr("alt"):
            match = IMAGESIZE_EXTRACTOR.match(img_tag["alt"])
            if match:
                width, _, height, realalt = match.groups()
                img_tag["width"] = width
                if height:
                    img_tag["height"] = height
                img_tag["alt"] = realalt

    # strip image specs from captions, if any
    for caption in soup.article.select(".figure .caption"):
        if hasattr(caption, "contents") and isinstance(caption.contents[0], str):
            match = IMAGESIZE_EXTRACTOR.match(caption.contents[0])
            if match:
                caption.contents[0].replace_with(match.group(4))

def link_img_tags(soup):
    """Convert each <img> tag in <article> to a link to its original."""
    if not soup.article:
        return
    for img_tag in soup.article.find_all("img"):
        a_tag = soup.new_tag("a", href=img_tag["src"], target="_blank")
        a_tag.insert(0, copy.copy(img_tag))
        img_tag.replace_with(a_tag)


def process_footnote_backlinks(soup):
    """Add class attribute "footnotes-backlink" to each footnote backlink."""
    for footnotes in soup.find_all("div", attrs={"class": "footnotes"}):
        for fn_a_tag in footnotes.find_all(lambda tag:
                                           tag.name == "a" and
                                           tag.has_attr("href") and
                                           tag["href"].startswith("#fnref") and
                                           tag.string == "\u21A9"):  # U+21A9: LEFTWARDS ARROW WITH HOOK
            fn_a_tag["class"] = "footnotes-backlink"
            fn_a_tag.string = "\u21A9\uFE0E" # U+FE0E: VARIATION SELECTOR-15


def postprocess_html_file(htmlfilepath):
    """Perform a series of postprocessing to an HTML file."""
    with open(htmlfilepath, "r+", encoding="utf-8") as htmlfileobj:
        soup = bs4.BeautifulSoup(htmlfileobj.read(), "lxml")

        # a series of postprocessing (extensible)
        process_image_sizes(soup)
        link_img_tags(soup)
        process_footnote_backlinks(soup)

        # write back
        htmlfileobj.seek(0)
        htmlfileobj.write(str(soup))
        htmlfileobj.truncate()


def static_vars(**kwargs):
    def decorate(func):
        for k in kwargs:
            setattr(func, k, kwargs[k])
        return func
    return decorate


# exclude_list is only inialized once to avoid constant disk IO
@static_vars(exclude_list=None)
def generate_blog(fresh=False, report_total_errors=True):
    """Generate the blog in BUILDDIR.

    Parameters
    ----------
    fresh : bool
        If True, remove all existing build artifects and start afresh;
        otherwise, only copy or build new or modified files. Default is
        False.
    report_total_errors : bool
        If True, a line will be printed to stderr at the end of build
        (assuming the function doesn't raise early) reporting the total
        number of errors, e.g., "build finished with 0 errors". This is
        turned on by default, but pass False to turn it off, which will
        result in a completely silent session if nothing changed. This
        is useful for auto-regen, for instance.

    Returns
    -------
    failed_builds : int
        Number of build failures.

    """

    # pylint: disable=too-many-branches,too-many-locals,too-many-statements

    if not os.path.isdir(SOURCEDIR):
        raise OSError("source directory %s does not exist" % SOURCEDIR)
    if not os.path.exists(HTMLTEMPLATE):
        raise OSError("HTML template %s not found" % HTMLTEMPLATE)

    if not os.path.isdir(BUILDDIR):
        if os.path.exists(BUILDDIR):
            os.remove(BUILDDIR)
        os.mkdir(BUILDDIR, mode=0o755)
    if fresh:
        for name in os.listdir(BUILDDIR):
            if name == ".git":
                continue
            obj = os.path.join(BUILDDIR, name)
            if os.path.isdir(obj):
                shutil.rmtree(obj)
            else:
                os.remove(obj)

    # nojekyll: https://help.github.com/articles/files-that-start-with-an-underscore-are-missing/
    if not os.path.exists(os.path.join(BUILDDIR, ".nojekyll")):
        with open(os.path.join(BUILDDIR, ".nojekyll"), "w") as fileobj:
            pass

    failed_builds = 0
    generator_mtime = os.path.getmtime(GENERATORSOURCE)
    template_mtime = os.path.getmtime(HTMLTEMPLATE)
    fundamental_mtime = max(generator_mtime, template_mtime)
    anything_modified = False

    exclude_list = generate_blog.exclude_list  # get value of static variable
    if exclude_list is None:
        try:
            with open(EXCLUDELIST) as fp:
                exclude_list = [os.path.abspath(os.path.join(SOURCEDIR, line.rstrip()))
                                for line in list(fp)
                                if line.strip() != "" and not line.startswith('#')]
        except OSError:
            exclude_list = []
        generate_blog.exclude_list = exclude_list  # assign to static variable for the future

    for root, dirs, files in os.walk(SOURCEDIR):
        # If root is in exclude list, skip all files and remove all subdirs from traversal list.
        if root in exclude_list:
            dirs[:] = []
            continue

        relroot = os.path.relpath(root, start=SOURCEDIR)
        dstroot = os.path.join(BUILDDIR, relroot)
        if not os.path.isdir(dstroot):
            if os.path.exists(dstroot):
                os.remove(dstroot)
            os.mkdir(dstroot, mode=0o755)

        for name in files:
            if name.startswith('.') or os.path.join(root, name) in exclude_list:
                continue

            extension = name.split(".")[-1]
            if extension not in ["css", "html", "jpg", "md", "png", "svg", "ico", "txt",
                                 "eot", "ttf", "woff", "woff2"]:
                continue

            relpath = os.path.join(relroot, name)
            srcpath = os.path.join(root, name)
            if extension == "md":
                dstpath = os.path.join(dstroot, re.sub(r'\.md$', '.html', name))
            else:
                dstpath = os.path.join(dstroot, name)
            if ((not os.path.exists(dstpath) or
                 os.path.getmtime(dstpath) <=
                 max(fundamental_mtime, os.path.getmtime(srcpath)))):
                # new post or modified post
                anything_modified = True
                if srcpath == INDEXMD:
                    continue # index will be processed separately
                if extension in ["css", "html", "jpg", "png", "svg", "ico", "txt",
                                 "eot", "ttf", "woff", "woff2"]:
                    sys.stderr.write("copying %s\n" % relpath)
                    shutil.copy(srcpath, dstpath)
                elif extension == "md":
                    sys.stderr.write("compiling %s\n" % relpath)
                    pandoc_args = [
                        "pandoc", srcpath,
                        "--template", HTMLTEMPLATE,
                        "--highlight-style=pygments",
                        "-o", dstpath,
                    ]
                    try:
                        subprocess.check_call(pandoc_args)
                    except subprocess.CalledProcessError:
                        failed_builds += 1
                        sys.stderr.write("error: failed to generate %s" %
                                         relpath)
                    # postprocess generated HTML file
                    postprocess_html_file(dstpath)

    if anything_modified:
        generate_index_and_feed()
        sys.stderr.write("done\n")

    if report_total_errors:
        sys.stderr.write("build finished with %d errors\n" % failed_builds)
    return failed_builds


def generate(args):
    """Wrapper for generate_blog(fresh=False)."""
    # pylint: disable=unused-argument
    exit(generate_blog(fresh=False))


def regenerate(args):
    """Wrapper for generate_blog(fresh=True)."""
    # pylint: disable=unused-argument
    exit(generate_blog(fresh=True))


def sanitize(string):
    """Sanitize string (title) for URI consumption."""
    if isinstance(string, bytes):
        string = string.decode('utf-8')
    # to lowercase
    string = string.lower()
    # strip all non-word, non-hyphen and non-whitespace characters
    string = re.sub(r"[^\w\s-]", "", string)
    # replace consecutive whitespaces with a single hyphen
    string = re.sub(r"\s+", "-", string)
    # percent encode the result
    return urllib.parse.quote(string)


def edit_post_with_editor(path):
    """Launch text editor to edit post at a given path.

    Text editor is $VISUAL, then if empty, $EDITOR, then if still empty,
    vi.

    """
    if "VISUAL" in os.environ:
        editor = os.environ["VISUAL"]
    elif "EDITOR" in os.environ:
        editor = os.environ["EDITOR"]
    else:
        editor = "vi"
    subprocess.call([editor, path])


def new_post(title):
    """Create a new post with metadata pre-filled.

    The path to the new post is printed to stdout.

    Returns
    -------
    0
        On success.

    """
    date = current_datetime()
    filename_date = date.strftime("%Y-%m-%d")
    iso_date = date.isoformat()
    display_date = "%s %d, %d" % (date.strftime("%B"), date.day, date.year)
    title_sanitized = sanitize(title)
    filename = "%s-%s.md" % (filename_date, title_sanitized)
    fullpath = os.path.join(POSTSDIR, filename)
    if not os.path.isdir(POSTSDIR):
        if os.path.exists(POSTSDIR):
            os.remove(POSTSDIR)
        os.mkdir(POSTSDIR, mode=0o755)
    if os.path.exists(fullpath):
        sys.stderr.write("%serror: '%s' already exists, please pick a different title%s\n" %
                         (RED, fullpath, RESET))
        return 1
    with open(fullpath, 'w', encoding='utf-8') as newpost:
        newpost.write("---\n")
        newpost.write('title: "%s"\n' % title)
        newpost.write("date: %s\n" % iso_date)
        newpost.write("date_display: %s\n" % display_date)
        newpost.write("---\n\n")
    sys.stderr.write("New post created in:\n")
    print(fullpath)
    edit_post_with_editor(fullpath)

    return 0


def new_post_cli(args):
    """CLI wrapper around new_post."""
    new_post(args.title)


def touch(filename):
    """Update the timestamp of a post to the current time."""
    filename = os.path.basename(filename)
    fullpath = os.path.join(POSTSDIR, filename)
    if not os.path.exists(fullpath):
        sys.stderr.write("%serror: post %s not found %s\n" %
                         (RED, fullpath, RESET))
        return 1
    filename_prefix_re = re.compile(r"^[0-9]{4}-[0-9]{2}-[0-9]{2}")
    if not filename_prefix_re.match(filename):
        sys.stderr.write(RED)
        sys.stderr.write("error: post %s is not a valid post\n" % filename)
        sys.stderr.write("error: the filename of a valid post begins with "
                         "a date in the form xxxx-xx-xx\n")
        sys.stderr.write(RESET)
        return 1

    # update timestamp in the metadata section of the post
    whatchanged = io.StringIO()
    date = current_datetime()
    iso_date = date.isoformat()
    display_date = "%s %d, %d" % (date.strftime("%B"), date.day, date.year)
    filename_date = date.strftime("%Y-%m-%d")
    with fileinput.input(files=(fullpath), inplace=True) as lines:
        meta_fences = 0
        for line in lines:
            if line.startswith("---"):
                meta_fences += 1
                sys.stdout.write(line)
                continue
            if meta_fences >= 2:
                # already went past the metadata section
                sys.stdout.write(line)
                continue

            if line.startswith("date: "):
                updated_line = "date: %s\n" % iso_date
                sys.stdout.write(updated_line)
                whatchanged.write("-%s+%s\n" % (line, updated_line))
                continue

            if line.startswith("date_display: "):
                updated_line = "date_display: %s\n" % display_date
                sys.stdout.write(updated_line)
                whatchanged.write("-%s+%s\n" % (line, updated_line))
                continue

            sys.stdout.write(line)

    sys.stderr.write("\n%schangeset:%s\n\n%s" %
                     (YELLOW, RESET, whatchanged.getvalue()))
    whatchanged.close()

    # check if the file needs to be renamed
    new_filename = filename_prefix_re.sub(filename_date, filename)
    if new_filename != filename:
        new_fullpath = os.path.join(POSTSDIR, new_filename)
        os.rename(fullpath, new_fullpath)
        sys.stderr.write("%srenamed to %s%s\n" % (YELLOW, new_filename, RESET))
    return 0


def touch_cli(args):
    """CLI wrapper around touch."""
    touch(args.filename)


def deploy(args):
    """Deploys build directory to origin/master without regenerating.

    Returns
    -------
    0
        On success. Exit early with nonzero status otherwise.

    """

    # pylint: disable=unused-argument,too-many-statements

    # check whether root is dirty
    os.chdir(ROOTDIR)
    dirty = subprocess.check_output(["git", "status", "--porcelain"])
    if dirty:
        sys.stderr.write(YELLOW)
        sys.stderr.write("Project root is dirty.\n")
        sys.stderr.write("You may want to commit in your changes "
                         "to the source branch, since the SHA and title "
                         "of the latest commit on the source branch will be "
                         "incorporated into the commit message on "
                         "the deployment branch. Type s[hell] on the "
                         "next prompt to open an interactive shell.\n")
        sys.stderr.write(RESET)
        while True:
            sys.stderr.write("Continue? [yNs] ")
            answer = input()
            if not answer:
                # default
                abort = True
                break
            elif answer.startswith(('y', 'Y')):
                abort = False
                break
            elif answer.startswith(('n', 'N')):
                abort = True
                break
            elif answer.startswith(('s', 'S')):
                shell = (os.environ['SHELL'] if 'SHELL' in os.environ and os.environ['SHELL']
                         else 'zsh')
                subprocess.call(shell)
                stilldirty = subprocess.check_output(["git", "status", "--porcelain"])
                if stilldirty:
                    sys.stderr.write(YELLOW)
                    sys.stderr.write("Project root is still dirty.\n")
                    sys.stderr.write(RESET)
            else:
                sys.stderr.write("Please answer yes or no.\n")
        if abort:
            sys.stderr.write("%saborting deployment%s\n" % (RED, RESET))
            return 1

    # extract latest commit on the source branch
    source_commit = subprocess.check_output(
        ["git", "log", "-1", "--pretty=oneline", "source", "--"]).decode('utf-8').strip()

    # cd into BUILDDIR and assemble commit message
    sys.stderr.write("%scommand: cd '%s'%s\n" % (BLUE, BUILDDIR, RESET))
    os.chdir(BUILDDIR)

    # extract updated time from atom.xml
    if not os.path.exists("atom.xml"):
        sys.stderr.write("atom.xml not found, cannot deploy\naborting\n")
        return 1
    atomxml = ET.parse("atom.xml").getroot()
    updated = atomxml.find('{http://www.w3.org/2005/Atom}updated').text

    commit_message = ("Site updated at %s\n\nsource branch was at:\n%s\n" %
                      (updated, source_commit))

    # commit changes in BUILDDIR
    sys.stderr.write("%scommand: git add --all%s\n" % (BLUE, RESET))
    subprocess.check_call(["git", "add", "--all"])
    sys.stderr.write("%scommand: git commit --no-verify --gpg-sign --message='%s'%s\n" %
                     (BLUE, commit_message, RESET))
    try:
        subprocess.check_call(["git", "commit", "--gpg-sign",
                               "--message=%s" % commit_message])
    except subprocess.CalledProcessError:
        sys.stderr.write("\n%serror: git commit failed%s\n" % (RED, RESET))
        return 1

    # check dirty status
    dirty = subprocess.check_output(["git", "status", "--porcelain"])
    if dirty:
        sys.stderr.write(RED)
        sys.stderr.write("error: failed to commit all changes; "
                         "build directory still dirty\n")
        sys.stderr.write("error: please manually inspect what was left out\n")
        sys.stderr.write(RESET)
        return 1

    # push to origin/master
    sys.stderr.write("%scommand: git push origin master%s\n" % (BLUE, RESET))
    try:
        subprocess.check_call(["git", "push", "origin", "master"])
    except subprocess.CalledProcessError:
        sys.stderr.write("\n%serror: git push failed%s\n" % (RED, RESET))
        return 1
    return 0


def gen_deploy(args):
    """Regenerate and deploy."""
    # pylint: disable=unused-argument,too-many-branches

    # try to smartly determine the latest post, and prompt to touch it
    current_time = time.time()
    latest_post = None
    latest_postdate = 0
    latest_mtime = 0
    for name in os.listdir(POSTSDIR):
        matchobj = re.match(r"^([0-9]{4})-([0-9]{2})-([0-9]{2})-.*\.md", name)
        if not matchobj:
            continue
        fullpath = os.path.join(POSTSDIR, name)
        mtime = os.path.getmtime(fullpath)
        # get post date from the date metadata field of the post
        postdate = 0
        with open(fullpath) as postobj:
            for line in postobj:
                dateregex = r"^date: (\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}-\d{2}:?\d{2})"
                datematch = re.match(dateregex, line.rstrip())
                if datematch:
                    postdate = dateutil.parser.parse(datematch.group(1)).timestamp()
                    break
        # skip the post if it is dated more than three days ago
        if current_time - postdate > 3 * 24 * 3600:
            continue
        if mtime > latest_mtime:
            latest_post = name
            latest_postdate = postdate
            latest_mtime = mtime
    # prompt for touching if the latest post determined above was
    # modified within the last hour but the date registered in the post
    # isn't within the last ten minutes
    if ((latest_post is not None and current_time - latest_mtime < 3600 and
         current_time - latest_postdate > 600)):
        sys.stderr.write("%sIt appears that %s might be a new post.\n"
                         "Do you want to touch its timestamp?%s\n" %
                         (GREEN, latest_post, RESET))
        while True:
            yesnoquit = input("[ynq]: ")
            if yesnoquit.startswith(("Y", "y")):
                yesno = True
                break
            elif yesnoquit.startswith(("N", "n")):
                yesno = False
                break
            elif yesnoquit.startswith(("Q", "q")):
                sys.stderr.write("%saborting gen_deploy%s\n" % (RED, RESET))
                return 1
            else:
                sys.stderr.write("Please answer yes, no, or quit.\n")
        if yesno:
            sys.stderr.write("%stouching %s%s\n" % (BLUE, latest_post, RESET))
            touch(latest_post)
            sys.stderr.write("\n")

    generate_blog(fresh=True)
    deploy(None)


class HTTPServerProcess(multiprocessing.Process):
    """This class can be used to run an HTTP server."""

    def __init__(self, rootdir):
        """Initialize the HTTPServerProcess class.

        Parameters
        ----------
        rootdir : str
            The root directory to serve from.

        """

        super().__init__()
        self.rootdir = rootdir

    def run(self):
        """Create an HTTP server and serve forever.

        Runs on localhost. The default port is 8000; if it is not
        available, a random port is used instead.
        """

        os.chdir(self.rootdir)
        # pylint: disable=invalid-name
        HandlerClass = http.server.SimpleHTTPRequestHandler
        try:
            httpd = http.server.HTTPServer(("", 8000), HandlerClass)
        except OSError:
            httpd = http.server.HTTPServer(("", 0), HandlerClass)
        _, portnumber = httpd.socket.getsockname()
        sys.stderr.write("server serving on http://localhost:%d\n" % portnumber)
        try:
            httpd.serve_forever()
        except KeyboardInterrupt:
            httpd.shutdown()


def preview(args):
    """Serve the blog and auto regenerate upon changes."""

    # pylint: disable=unused-argument

    server_process = HTTPServerProcess(BUILDDIR)
    server_process.start()
    sys.stderr.write("watching for changes\n")
    sys.stderr.write("send SIGINT to stop\n")

    # install a SIGINT handler only for this process
    sigint_raised = False

    def sigint_mitigator(signum, frame):
        """Translate SIGINT to setting the sigint_raised flag."""
        nonlocal sigint_raised
        sigint_raised = True

    signal.signal(signal.SIGINT, sigint_mitigator)

    # Watch and auto-regen.
    # No need to actually implement watch separately, since
    # generate_blog(fresh=False, report_total_errors=False) already
    # watches for modifications and only regens upon changes, and it is
    # completely silent when there's no change.
    while not sigint_raised:
        generate_blog(fresh=False, report_total_errors=False)
        time.sleep(0.5)

    sys.stderr.write("\nSIGINT received, cleaning up...\n")
    server_process.join()
    return 0


def list_posts():
    """List all posts, with date, title, and path to source file.

    This function only lists posts that has been built (since it reads
    metadata from HTML rather than Markdown).

    Returns
    -------
    posts : list
        A list of posts, in reverse chronological order, where each
        element is a tuple of (date, title, path to source file).

    """
    posts = []
    for name in os.listdir(os.path.join(BUILDDIR, "blog")):
        if not re.match(r"^[0-9]{4}-[0-9]{2}-[0-9]{2}.*\.html", name):
            continue

        htmlpath = os.path.join(BUILDDIR, "blog", name)
        entry = AtomEntry()
        item = RssItem()
        try:
            with open(htmlpath, encoding="utf-8") as htmlfile:
                soup = bs4.BeautifulSoup(htmlfile.read(), "lxml")
                title = soup.title.text
                date = dateutil.parser.parse(soup.find("meta", attrs={"name": "date"})["content"])
                source_path = os.path.join(POSTSDIR, re.sub(r'.html$', '.md', name))
                posts.append((date, title, source_path))
        except Exception:
            sys.stderr.write("error: failed to read metadata from HTML file %s\n" % name)
            with open(htmlpath, encoding="utf-8") as htmlfile:
                sys.stderr.write("dumping HTML:%s\n\n" % htmlfile.read())
            raise

    posts.sort(key=lambda post: post[0], reverse=True)
    return posts


class PostSelector:

    def __init__(self, term, posts):
        self._term = term
        self.posts_per_page = term.height - 2
        self.pages = [posts[i:i+self.posts_per_page]
                      for i in range(0, len(posts), self.posts_per_page)]
        self.num_pages = len(self.pages)
        self.pagepos = 0
        self.postpos = 0
        self.inserting = False  # True if in the middle of inserting a post #, False otherwise
        term.enter_fullscreen()
        print(term.clear(), end="")
        sys.stdout.flush()
        self.selection = ""
        self.quit = False

        self.display_page()

    def _clear_to_eol(self):
        term = self._term
        print(term.clear_eol, end="")
        sys.stdout.flush()

    def _print_line(self, line, linenum, highlight=False):
        term = self._term
        width = term.width
        with term.location(0, linenum):
            if highlight:
                print(term.reverse(line[:width]), end="")
            else:
                print(line[:width], end="")
            self._clear_to_eol()

    def _print_post(self, page, pos, highlight=False):
        if pos >= len(page):
            # if position out of range, just clear the line
            self._print_line("", pos + 1, highlight)
        else:
            date, title, path = page[pos]
            line = "%3d: %s %s" % (pos, date.strftime("%m/%d/%y"), title)
            self._print_line(line, pos + 1, highlight)

    def display_page(self):
        term = self._term
        page = self.pages[self.pagepos]

        with term.hidden_cursor():
            topline = "  PAGE %d/%d  POST %d" % (self.pagepos + 1, self.num_pages, self.postpos)
            if self.inserting:
                topline += term.blink("_")
            self._print_line(topline, 0, highlight=True)

            for i in range(self.posts_per_page):
                self._print_post(page, i)
            # highlight selected post
            self._print_post(page, self.postpos, highlight=True)

            bottomline = "  Press h for help."
            self._print_line(bottomline, term.height - 1, highlight=True)

    def dispatch(self, key):
        term = self._term
        if key in string.digits:
            # insert
            if self.inserting:
                newpostpos = 10 * self.postpos + int(key)
                if newpostpos < len(self.pages[self.pagepos]):
                    self.postpos = newpostpos
            else:
                self.postpos = int(key)
            self.inserting = True
        elif key.name == "KEY_DELETE":
            self.postpos //= 10
            self.inserting = True
        else:
            self.inserting = False
            if key.name == "KEY_ENTER":
                self.selection = self.pages[self.pagepos][self.postpos][2]
            if key in {"q", "Q"}:
                self.quit = True
            elif key.name == "KEY_DOWN" or key in {"n", "N"}:
                if self.postpos + 1 < len(self.pages[self.pagepos]):
                    self.postpos += 1
            elif key.name == "KEY_UP" or key in {"p", "P"}:
                if self.postpos > 0:
                    self.postpos -= 1
            elif key.name == "KEY_RIGHT" or key in {".", ">"}:
                if self.pagepos + 1 < self.num_pages:
                    self.pagepos += 1
                    self.postpos = 0
            elif key.name == "KEY_LEFT" or key in {",", "<"}:
                if self.pagepos > 0:
                    self.pagepos -= 1
                    self.postpos = 0
            elif key in {"h", "H"}:
                print(term.clear_eol, end="")
                sys.stdout.flush()
                help_text_lines = [
                    "Next post:         n or <down>",
                    "Previous post:     p or <up>",
                    "Next page:         . or > or <right>",
                    "Previous page:     , or < or <left>",
                    "Select post:       <enter> or <return>",
                    "Select by number:  type number as shown (delete or backspace to edit)",
                    "Get help:          h",
                    "Quit program:      q",
                ]
                for i in range(term.height - 1):
                    self._print_line(help_text_lines[i] if i < len(help_text_lines) else "", i)
                bottomline = "  Press any key to continue."
                self._print_line(bottomline, term.height - 1, highlight=True)

                with term.raw():
                    term.inkey()

    def restore(self):
        term = self._term
        term.exit_fullscreen()
        print(term.clear(), end="")
        sys.stdout.flush()

    def select(self):
        term = self._term
        try:
            while True:
                with term.raw():
                    self.dispatch(term.inkey())
                if self.selection or self.quit:
                    break
                self.display_page()
        except Exception:
            raise
        finally:
            self.restore()

        return self.selection


def edit_existing_post(args):
    selector = PostSelector(blessed.Terminal(), list_posts())
    selection = selector.select()
    if selection:
        print(selection)
        edit_post_with_editor(selection)
    else:
        return 1


def main():
    """CLI interface."""
    description = "Simple blog generator in Python with Pandoc as backend."
    parser = argparse.ArgumentParser(description=description)
    subparsers = parser.add_subparsers(dest="action")
    subparsers.required = True

    parser_new_post = subparsers.add_parser(
        "new_post", aliases=["n", "new"],
        description="Create a new post with metadata pre-filled.")
    parser_new_post.add_argument("title", help="title of the new post")
    parser_new_post.set_defaults(func=new_post_cli)

    parser_new_post = subparsers.add_parser(
        "touch", aliases=["t", "tou"],
        description="""Touch an existing post, i.e., update its
        timestamp to current time.  Why is this ever useful? Well, the
        timestamp filled in by new_post is the time of creation, but one
        might spend several hours after the creation of the file to
        finish the post. Sometimes the post is even created on one day
        and finished on another (say created at 11pm and finished at
        1am). Therefore, one may want to retouch the timestamp before
        publishing.""")
    parser_new_post.add_argument("filename",
                                 help="path or basename of the source file, "
                                 "e.g., 2015-05-05-new-blog-new-start.md")
    parser_new_post.set_defaults(func=touch_cli)

    parser_generate = subparsers.add_parser(
        "generate", aliases=["g", "gen"],
        description="Generate new or changed objects.")
    parser_generate.set_defaults(func=generate)

    parser_regenerate = subparsers.add_parser(
        "regenerate", aliases=["r", "regen"],
        description="Regenerate the entire blog afresh.")
    parser_regenerate.set_defaults(func=regenerate)

    parser_new_post = subparsers.add_parser(
        "preview", aliases=["p", "pre"],
        description="Serve the blog locally and auto regenerate upon changes.")
    parser_new_post.set_defaults(func=preview)

    parser_new_post = subparsers.add_parser(
        "deploy", aliases=["d", "dep"],
        description="Deploy build/ to origin/master without regenerating.")
    parser_new_post.set_defaults(func=deploy)

    parser_new_post = subparsers.add_parser(
        "gen_deploy", aliases=["gd", "gendep"],
        description="Rebuild entire blog and deploy build/ to origin/master.")
    parser_new_post.set_defaults(func=gen_deploy)

    parser_new_post = subparsers.add_parser(
        "edit", aliases=["e", "ed"],
        description="Bring up post selector to select post for editing.")
    parser_new_post.set_defaults(func=edit_existing_post)

    with init_colorama():
        args = parser.parse_args()
        returncode = args.func(args)
    exit(returncode)


if __name__ == '__main__':
    main()