#!/usr/bin/env python3

"""A simple blog generator with Pandoc as backend."""

import argparse
from contextlib import contextmanager
import datetime
import io
import http.client
import http.server
import multiprocessing
import os
import re
import shutil
import signal
import subprocess
import sys
import tempfile
import time
import urllib.parse
import xml.etree.ElementTree as ET

import bs4
import colorama
import dateutil.parser
import dateutil.tz


ROOTDIR = os.path.dirname(os.path.realpath(__file__))
SOURCEDIR = os.path.join(ROOTDIR, "source")
INDEXMD = os.path.join(SOURCEDIR, "index.md")
TEMPLATEDIR = os.path.join(ROOTDIR, "templates")
HTMLTEMPLATE = os.path.join(TEMPLATEDIR, "template.html")
BUILDDIR = os.path.join(ROOTDIR, "build")
ATOM = os.path.join(BUILDDIR, "atom.xml")
INDEXHTML = os.path.join(BUILDDIR, "index.html")

FEED_MAX_ENTRIES = 20


# Hack ET to support CDATA.
# I know _escape_cdata pops out of nowhere but I won't investigate until
# it breaks.
# XML suuuuuucks.
# http://stackoverflow.com/a/30019607/1944784

def cdata(text=None):
    """Generate an XML CDATA element (ET.Element)."""
    element = ET.Element('![CDATA[')
    element.text = text
    return element

# pylint: disable=protected-access,undefined-variable

ET._original_serialize_xml = ET._serialize_xml

def _serialize_xml(write, elem, qnames, namespaces, short_empty_elements,
                   **kwargs):
    """Hacked _serialize_xml, tested to work in Python 3.4.3."""
    if elem.tag == '![CDATA[':
        write("\n<{}{}]]>\n".format(elem.tag, elem.text))
        if elem.tail:
            write(_escape_cdata(elem.tail))
    else:
        return ET._original_serialize_xml(write, elem, qnames, namespaces,
                                          short_empty_elements, **kwargs)

ET._serialize_xml = ET._serialize['xml'] = _serialize_xml


# declare the global foreground ANSI codes
BLACK = ""
BLUE = ""
CYAN = ""
GREEN = ""
MAGENTA = ""
RED = ""
WHITE = ""
YELLOW = ""
RESET = ""

@contextmanager
def init_colorama():
    """Set global foreground modifying ANSI codes.

    BLACK, BLUE, CYAN, GREEN, MAGENTA, RED, WHITE, YELLOW, and RESET.

    """

    # pylint: disable=exec-used,invalid-name

    colorama.init()
    for color, ansi in colorama.Fore.__dict__.items():
        exec("global {0}; {0} = '{1}'".format(color, ansi))
    yield
    colorama.deinit()


class AtomFeed(object):
    """Class for storing atom:feed date and metadata."""

    # pylint: disable=invalid-name,too-many-instance-attributes

    def __init__(self):
        """Define available attributes."""
        self.author = None  # atom:author
        self.generator = None  # atom:generator, optional
        self.icon = None  # atom:icon, optional
        self.logo = None  # atom:logo, optional
        self.id_text = None  # atom:id, just use URI
        self.id = None  # atom:id
        self.links = []  # list of atom:link
        self.title_text = None  # the text of atom:title
        self.title = None  # atom:title
        self.updated_datetime = None  # update time as a datetime object
        self.updated = None  # atom:updated
        self.entries = []  # list of atom:entry, in reverse time order
        self.feed = None  # atom:feed, assembled

    def assemble_feed(self):
        """Assemble atom:feed."""
        self.feed = ET.Element("feed", xmlns="http://www.w3.org/2005/Atom")
        self.feed.append(self.title)
        for link in self.links:
            self.feed.append(link)
        self.feed.append(self.updated)
        self.feed.append(self.id)
        self.feed.append(self.author)
        if self.icon is not None:
            self.feed.append(self.icon)
        if self.logo is not None:
            self.feed.append(self.icon)
        if self.generator is not None:
            self.feed.append(self.generator)
        # include at most FEED_MAX_ENTRIES entries in the feed
        for entry in self.entries[:FEED_MAX_ENTRIES]:
            self.feed.append(entry.entry)

    def dump_feed(self):
        """Dump atom:feed XML."""
        if self.feed is None:
            self.assemble_feed()
        return ET.tostring(self.feed).decode('utf-8')


class AtomEntry(object):
    """Class for storing atom:entry data and metadata."""

    # pylint: disable=invalid-name,too-many-instance-attributes

    def __init__(self):
        """Define available attributes."""
        self.author = None  # atom:author
        self.id_text = None  # atom:id, just use URI
        self.id = None  # atom:id
        self.relpath = None  # HTML page path relative to home
        self.link = None  # atom:link
        self.title_text = None  # plain text title
        self.title = None  # atom:title
        self.updated_datetime = None  # update time as a datetime object
        self.updated = None  # atom:updated
        self.content_html = None  # content as HTML markup
        self.content = None  # atom:content
        self.entry = None  # atom:entry, assembled

    def assemble_entry(self):
        """Assemble atom:entry."""
        self.entry = ET.Element("entry")
        self.entry.append(self.title)
        self.entry.append(self.link)
        self.entry.append(self.updated)
        self.entry.append(self.id)
        self.entry.append(self.author)
        self.entry.append(self.content)

    def dump_entry(self):
        """Dump atom:entry XML."""
        if self.entry is None:
            self.assemble_entry()
        return ET.tostring(self.entry).decode('utf-8')


def generate_index(feed):
    """Generate index.html from index.md and a TOC."""

    sys.stderr.write("generating index.html\n")

    # generate TOC
    tocbuff = io.StringIO()
    tocbuff.write('<div class="indextoc" id="toc">')
    year = 10000  # will be larger than the latest year for quite a while
    # recall that entries are in reverse chronological order
    for entry in feed.entries:
        date = entry.updated_datetime
        if date.year < year:
            # write a new <h2 class="toc"> tag with the smaller year
            year = date.year
            tocbuff.write(u'\n<h2 class="toc" id="{0}" datetime="{0}">{0}</h2>\n\n'.format(year))

        # write a new <li> entry (<ul>) in Markdown, in the format:
        # * <time class="tocdate" datetime="2015-05-05T00:06:04-0700">May 5</time>
        #   [Blah blah](/blog/2015-05-04-blah-blah.html)
        monthday = date.strftime("%b %d")
        tocbuff.write(u'* <time class="tocdate" datetime="%s">%s</time> [%s](%s)\n' %
                      (date.isoformat(), monthday, entry.title_text, entry.relpath))
    tocbuff.write('</div>')

    # create tempfile with index.md and the TOC concatenated, and generate index.html from that
    # pylint: disable=invalid-name
    fd, tmppath = tempfile.mkstemp()
    os.close(fd)
    with open(tmppath, 'w', encoding='utf-8') as tmpfile:
        if os.path.exists(INDEXMD):
            with open(INDEXMD, 'r', encoding='utf-8') as indexmd:
                tmpfile.write(u"%s\n\n<hr>\n\n" % indexmd.read())
        tmpfile.write("%s\n" % tocbuff.getvalue())
        tocbuff.close()

    pandoc_args = [
        "pandoc", tmppath,
        "--template", HTMLTEMPLATE,
        "--highlight-style=pygments",
        "-o", INDEXHTML,
    ]
    try:
        subprocess.check_call(pandoc_args)
    except subprocess.CalledProcessError:
        sys.stderr.write("error: failed to generate index.html\n")
    os.remove(tmppath)


def generate_index_and_feed():
    """Generate index.html and atom feed."""
    sys.stderr.write("generating atom feed\n")
    # initialize feed
    feed = AtomFeed()
    # TODO: Put hard-coded values in a config file
    feed.author = ET.fromstring("<author>"
                                "<name>Zhiming Wang</name>"
                                "<uri>https://github.com/zmwangx</uri>"
                                "<email>zmwangx@gmail.com</email>"
                                "</author>")
    feed.generator = ET.Element("generator", uri="https://github.com/zmwangx/zmwangx.github.io")
    feed.generator.text = "pyblog"
    feed.icon = ET.Element("icon")
    feed.icon.text = "http://zmwangx.github.io/img/icon-400.png"
    feed.id_text = "http://zmwangx.github.io"
    feed.id = ET.Element("id")
    feed.id.text = feed.id_text
    feed.links = [
        ET.Element("link", href="http://zmwangx.github.io/atom.xml", rel="self"),
        ET.Element("link", href="http://zmwangx.github.io/"),
    ]
    feed.title_text = "dl? cmplnts?"
    feed.title = ET.fromstring("<title>%s</title>" % feed.title_text)
    # update time will be set after everthing finishes

    postspath = os.path.join(BUILDDIR, "blog")
    # traverse all posts in reverse time order
    for name in sorted(os.listdir(postspath), reverse=True):
        if re.match(r"^(\d{4})-(\d{2})-(\d{2}).*\.html", name):
            htmlpath = os.path.join(postspath, name)
            entry = AtomEntry()
            with open(htmlpath, encoding="utf-8") as htmlfile:
                soup = bs4.BeautifulSoup(htmlfile.read())
                entry.author = feed.author  # assume it's always the same author
                entry.id_text = "%s/blog/%s" % (feed.id_text, name)
                entry.id = ET.Element("id")
                entry.id.text = entry.id_text
                entry.relpath = "/blog/%s" % name
                entry.link = ET.Element("link", href=entry.id_text)
                entry.title_text = soup.title.text
                entry.title = ET.Element("title", type="html")
                entry.title.text = entry.title_text
                post_date = soup.find("meta", attrs={"name": "date"})["content"]
                entry.updated_datetime = dateutil.parser.parse(post_date)
                entry.updated = ET.Element("updated")
                # pylint: disable=no-member
                entry.updated.text = entry.updated_datetime.isoformat()
                # extract the article content without header and footer
                article = soup.article
                article.header.extract()
                article.footer.extract()
                entry.content_html = ''.join([str(content)
                                              for content in article.contents])
                entry.content = ET.Element("content", type="html")
                entry.content.append(cdata(entry.content_html))
                entry.assemble_entry()
                feed.entries.append(entry)

    generate_index(feed)

    feed.updated_datetime = datetime.datetime.fromtimestamp(round(time.time()),
                                                            dateutil.tz.tzlocal())
    feed.updated = ET.Element("updated")
    feed.updated.text = feed.updated_datetime.isoformat()

    with open(ATOM, 'w', encoding='utf-8') as atom:
        atom.write("%s\n" % feed.dump_feed())
        sys.stderr.write("wrote atom.xml\n")


def generate_blog(fresh=False, report_total_errors=True):
    """Generate the blog in BUILDDIR.

    Parameters
    ----------
    fresh : bool
        If True, remove all existing build artifects and start afresh;
        otherwise, only copy or build new or modified files. Default is
        False.
    report_total_errors : bool
        If True, a line will be printed to stderr at the end of build
        (assuming the function doesn't raise early) reporting the total
        number of errors, e.g., "build finished with 0 errors". This is
        turned on by default, but pass False to turn it off, which will
        result in a completely silent session if nothing changed. This
        is useful for auto-regen, for instance.

    Returns
    -------
    failed_builds : int
        Number of build failures.

    """

    # pylint: disable=too-many-branches,too-many-locals,too-many-statements

    if not os.path.isdir(SOURCEDIR):
        raise OSError("source directory %s does not exist" % SOURCEDIR)
    if not os.path.exists(HTMLTEMPLATE):
        raise OSError("HTML template %s not found" % HTMLTEMPLATE)

    if not os.path.isdir(BUILDDIR):
        if os.path.exists(BUILDDIR):
            os.remove(BUILDDIR)
        os.mkdir(BUILDDIR, mode=0o755)
    if fresh:
        for name in os.listdir(BUILDDIR):
            if name == ".git":
                continue
            obj = os.path.join(BUILDDIR, name)
            if os.path.isdir(obj):
                shutil.rmtree(obj)
            else:
                os.remove(obj)

    failed_builds = 0
    template_mtime = os.path.getmtime(HTMLTEMPLATE)
    anything_modified = False

    for root, _, files in os.walk(SOURCEDIR):
        relroot = os.path.relpath(root, start=SOURCEDIR)
        dstroot = os.path.join(BUILDDIR, relroot)
        if not os.path.isdir(dstroot):
            if os.path.exists(dstroot):
                os.remove(dstroot)
            os.mkdir(dstroot, mode=0o755)

        for name in files:
            if name.startswith('.'):
                continue
            extension = name.split(".")[-1]
            if extension not in ["css", "jpg", "md", "png", "svg", "ico"]:
                continue

            relpath = os.path.join(relroot, name)
            srcpath = os.path.join(root, name)
            if extension == "md":
                dstpath = os.path.join(dstroot, re.sub(r'\.md$', '.html', name))
            else:
                dstpath = os.path.join(dstroot, name)
            if ((not os.path.exists(dstpath) or
                 os.path.getmtime(dstpath) <=
                 max(template_mtime, os.path.getmtime(srcpath)))):
                # new post or modified post
                anything_modified = True
                if srcpath == INDEXMD:
                    continue # index will be processed separately
                if extension in ["css", "jpg", "png", "svg", "ico"]:
                    sys.stderr.write("copying %s\n" % relpath)
                    shutil.copy(srcpath, dstpath)
                elif extension == "md":
                    sys.stderr.write("generating %s\n" % relpath)
                    pandoc_args = [
                        "pandoc", srcpath,
                        "--template", HTMLTEMPLATE,
                        "--highlight-style=pygments",
                        "-o", dstpath,
                    ]
                    try:
                        subprocess.check_call(pandoc_args)
                    except subprocess.CalledProcessError:
                        failed_builds += 1
                        sys.stderr.write("error: failed to generate %s" %
                                         relpath)
    if anything_modified:
        generate_index_and_feed()

    if report_total_errors:
        sys.stderr.write("build finished with %d errors\n" % failed_builds)
    return failed_builds


def generate(args):
    """Wrapper for generate_blog(fresh=False)."""
    # pylint: disable=unused-argument
    exit(generate_blog(fresh=False))


def regenerate(args):
    """Wrapper for generate_blog(fresh=True)."""
    # pylint: disable=unused-argument
    exit(generate_blog(fresh=True))


def sanitize(string):
    """Sanitize string (title) for URI consumption."""
    if isinstance(string, bytes):
        string = string.decode('utf-8')
    # to lowercase
    string = string.lower()
    # strip all non-word, non-hyphen and non-whitespace characters
    string = re.sub(r"[^\w\s-]", "", string)
    # replace consecutive whitespaces with a single hyphen
    string = re.sub(r"\s+", "-", string)
    # percent encode the result
    return urllib.parse.quote(string)


def new_post(args):
    """Create a new post with metadata pre-filled.

    The path to the new post is printed to stdout.

    Returns
    -------
    0
        On success.

    """
    title = args.title
    date = datetime.datetime.fromtimestamp(round(time.time()),
                                           dateutil.tz.tzlocal())
    filename_date = date.strftime("%Y-%m-%d")
    iso_date = date.isoformat()
    display_date = "%s %d, %d" % (date.strftime("%B"), date.day, date.year)
    title_sanitized = sanitize(title)
    filename = "%s-%s.md" % (filename_date, title_sanitized)
    postdir = os.path.join(SOURCEDIR, "blog")
    fullpath = os.path.join(postdir, filename)
    if not os.path.isdir(postdir):
        if os.path.exists(postdir):
            os.remove(postdir)
        os.mkdir(postdir, mode=0o755)
    with open(fullpath, 'w', encoding='utf-8') as newpost:
        newpost.write("---\n")
        newpost.write('title: "%s"\n' % title)
        newpost.write("date: %s\n" % iso_date)
        newpost.write("date-display: %s\n" % display_date)
        newpost.write("---\n")
    sys.stderr.write("New post created in:\n")
    print(fullpath)
    return 0


def deploy(args):
    """Deploys build directory to origin/master without regenerating.

    Returns
    -------
    0
        On success. Exit early with nonzero status otherwise.

    """

    # pylint: disable=unused-argument,too-many-statements

    # check whether root is dirty
    os.chdir(ROOTDIR)
    dirty = subprocess.check_output(["git", "status", "--porcelain"])
    if dirty:
        sys.stderr.write(YELLOW)
        sys.stderr.write("Project root is dirty.\n")
        sys.stderr.write("You may want to commit in your changes "
                         "to the source branch, since the SHA and title "
                         "of the latest commit on the source branch will be "
                         "incorporated into the commit message on "
                         "the deployment branch.\n")
        sys.stderr.write(RESET)
        while True:
            sys.stderr.write("Continue? [yN] ")
            answer = input()
            if not answer:
                # default
                abort = True
                break
            elif answer.startswith(('y', 'Y')):
                abort = False
                break
            elif answer.startswith(('n', 'N')):
                abort = True
                break
            else:
                sys.stderr.write("Please answer yes or no.\n")
        if abort:
            sys.stderr.write("%saborting deployment%s\n" % (RED, RESET))
            exit(1)

    # extract latest commit on the source branch
    source_commit = subprocess.check_output(
        ["git", "log", "-1", "--pretty=oneline", "source", "--"]).decode('utf-8').strip()

    # cd into BUILDDIR and assemble commit message
    sys.stderr.write("%scommand: cd '%s'%s\n" % (BLUE, BUILDDIR, RESET))
    os.chdir(BUILDDIR)

    # extract updated time from atom.xml
    if not os.path.exists("atom.xml"):
        sys.stderr.write("atom.xml not found, cannot deploy\naborting\n")
        exit(1)
    atomxml = ET.parse("atom.xml").getroot()
    updated = atomxml.find('{http://www.w3.org/2005/Atom}updated').text

    commit_message = ("site updated %s\n\nsource branch was at:\n%s\n" %
                      (updated, source_commit))

    # commit changes in BUILDDIR
    sys.stderr.write("%scommand: git add --all%s\n" % (BLUE, RESET))
    subprocess.check_call(["git", "add", "--all"])
    sys.stderr.write("%scommand: git commit --gpg-sign --message='%s'%s\n" %
                     (BLUE, commit_message, RESET))
    try:
        subprocess.check_call(["git", "commit", "--gpg-sign",
                               "--message=%s" % commit_message])
    except subprocess.CalledProcessError:
        sys.stderr.write("\n%serror: git commit failed%s\n" % (RED, RESET))
        exit(1)

    # check dirty status
    dirty = subprocess.check_output(["git", "status", "--porcelain"])
    if dirty:
        sys.stderr.write(RED)
        sys.stderr.write("error: failed to commit all changes; "
                         "build directory still dirty\n")
        sys.stderr.write("error: please manually inspect what was left out\n")
        sys.stderr.write(RESET)
        exit(1)

    # push to origin/master
    sys.stderr.write("%scommand: git push origin master%s\n" % (BLUE, RESET))
    try:
        subprocess.check_call(["git", "push", "origin", "master"])
    except subprocess.CalledProcessError:
        sys.stderr.write("\n%serror: git push failed%s\n" % (RED, RESET))
        exit(1)
    return 0


def gen_deploy(args):
    """Regenerate and deploy."""
    # pylint: disable=unused-argument
    generate_blog(fresh=True)
    deploy(None)


class HTTPServerProcess(multiprocessing.Process):
    """This class can be used to run an HTTP server."""

    def __init__(self, rootdir):
        """Initialize the HTTPServerProcess class.

        Parameters
        ----------
        rootdir : str
            The root directory to serve from.

        """

        super().__init__()
        self.rootdir = rootdir

    def run(self):
        """Create an HTTP server and serve forever.

        Runs on localhost. The default port is 8000; if it is not
        available, a random port is used instead.
        """

        os.chdir(self.rootdir)
        # pylint: disable=invalid-name
        HandlerClass = http.server.SimpleHTTPRequestHandler
        try:
            httpd = http.server.HTTPServer(("", 8000), HandlerClass)
        except OSError:
            httpd = http.server.HTTPServer(("", 0), HandlerClass)
        _, portnumber = httpd.socket.getsockname()
        sys.stderr.write("server serving on http://localhost:%d\n" % portnumber)
        try:
            httpd.serve_forever()
        except KeyboardInterrupt:
            httpd.shutdown()


def preview(args):
    """Serve the blog and auto regenerate upon changes."""

    # pylint: disable=unused-argument

    server_process = HTTPServerProcess(BUILDDIR)
    server_process.start()
    sys.stderr.write("watching for changes\n")
    sys.stderr.write("send SIGINT to stop\n")

    # install a SIGINT handler only for this process
    sigint_raised = False

    def sigint_mitigator(signum, frame):
        nonlocal sigint_raised
        sigint_raised = True

    signal.signal(signal.SIGINT, sigint_mitigator)

    # Watch and auto-regen.
    # No need to actually implement watch separately, since
    # generate_blog(fresh=False, report_total_errors=False) already
    # watches for modifications and only regens upon changes, and it is
    # completely silent when there's no change.
    while not sigint_raised:
        generate_blog(fresh=False, report_total_errors=False)
        time.sleep(0.5)

    sys.stderr.write("\nSIGINT received, cleaning up...\n")
    server_process.join()
    return 0


def main():
    """CLI interface."""
    description = "Simple blog generator in Python with Pandoc as backend."
    parser = argparse.ArgumentParser(description=description)
    subparsers = parser.add_subparsers()

    parser_generate = subparsers.add_parser(
        "generate", aliases=["g", "gen"],
        description="Generate new or changed objects.")
    parser_generate.set_defaults(func=generate)

    parser_regenerate = subparsers.add_parser(
        "regenerate", aliases=["r", "regen"],
        description="Regenerate the entire blog afresh.")
    parser_regenerate.set_defaults(func=regenerate)

    parser_new_post = subparsers.add_parser(
        "new_post", aliases=["n", "new"],
        description="Create a new post with metadata pre-filled.")
    parser_new_post.add_argument("title", help="title of the new post")
    parser_new_post.set_defaults(func=new_post)

    parser_new_post = subparsers.add_parser(
        "deploy", aliases=["d", "dep"],
        description="Deploy build/ to origin/master without regenerating.")
    parser_new_post.set_defaults(func=deploy)

    parser_new_post = subparsers.add_parser(
        "gen_deploy", aliases=["gd", "gendep"],
        description="Rebuild entire blog and deploy build/ to origin/master.")
    parser_new_post.set_defaults(func=gen_deploy)

    parser_new_post = subparsers.add_parser(
        "preview", aliases=["p", "pre"],
        description="Serve the blog locally and auto regenerate upon changes.")
    parser_new_post.set_defaults(func=preview)

    with init_colorama():
        args = parser.parse_args()
        returncode = args.func(args)
    exit(returncode)


if __name__ == '__main__':
    main()