"
# Writing the menu in all pages contained in the variable in place of the -- generate menu here --
for html_file in html_fileList:
with open(tmppath, 'w', encoding='utf-8') as tmpfile:
if os.path.exists("build/"+html_file):
with open("build/"+html_file, 'r', encoding='utf-8') as indexmd:
lines = indexmd.readlines()
with open("build/"+html_file, 'w', encoding='utf-8') as indexmd:
for line in lines:
indexmd.write(re.sub(r'-- generate menu here --', htmly_website_page, line))
os.remove(tmppath)
def generate_table():
"""Generate table."""
first_comp = 1
first_pr = 1
tr_class = "odd"
documents_fileList = []
documents_fileList.append("/website/bts-sio.html")
fd, tmppath = tempfile.mkstemp()
os.close(fd)
htmly_website_page = ""
if os.path.exists(BUILDDIR+"/website/bts-sio.html"):
sys.stderr.write("generating table\n")
# Put in a list the pages where the menu will be written
#for root, dirs, files in os.walk(BUILDDIR+"/website/Documents/Situation2"):
# for name in files:
# if name.endswith(".html"):
# try:
# documents_fileList.append(os.path.join(root.split('build')[1], name))
# except IndexError:
# documents_fileList.append(name)
# Generate the string who contain the links of the menu
#htmly_website_page = "
"
#for name in os.listdir(os.path.join(BUILDDIR, "website/Documents/Situation2")):
# htmly_website_page += "
"
# Writing the menu in all pages contained in the variable in place of the -- generate submenu here --
for document_file in documents_fileList:
with open(tmppath, 'w', encoding='utf-8') as tmpfile:
if os.path.exists("build"+document_file):
with open("build"+document_file, 'r', encoding='utf-8') as indexmd:
lines = indexmd.readlines()
with open("build"+document_file, 'w', encoding='utf-8') as indexmd:
for line in lines:
indexmd.write(re.sub(r'
-- table --', '
Compétence
Activité
Justification
', line))
with open("build"+document_file, 'r', encoding='utf-8') as indexmd:
lines = indexmd.readlines()
with open("build"+document_file, 'w', encoding='utf-8') as indexmd:
for line in lines:
if (re.match('^\$.*', line) and first_pr == 1):
line_edited='
'
indexmd.write(re.sub(r'^\$.*', line_edited, line))
else:
indexmd.write(line)
with open("build"+document_file, 'r', encoding='utf-8') as indexmd:
lines = indexmd.readlines()
with open("build"+document_file, 'w', encoding='utf-8') as indexmd:
for line in lines:
if (re.match('^ \$.*\$$', line)):
indexmd.write(re.sub(r'^ \$.*\$$', "
', line))
else:
indexmd.write(line)
with open("build"+document_file, 'r', encoding='utf-8') as indexmd:
lines = indexmd.readlines()
with open("build"+document_file, 'w', encoding='utf-8') as indexmd:
for line in lines:
if (re.match('^ \$.*', line)):
indexmd.write(re.sub(r'^ \$.*', "
"+line.split("$")[1]+"
", line))
else:
indexmd.write(re.sub(r'^ \$.*', "
"+line+"
", line))
with open("build"+document_file, 'r', encoding='utf-8') as indexmd:
lines = indexmd.readlines()
with open("build"+document_file, 'w', encoding='utf-8') as indexmd:
for line in lines:
indexmd.write(re.sub(r"-- end table --", "
", line))
os.remove(tmppath)
def generate_blog_list(feed):
""""Generate blog list """
sys.stderr.write("generating blog list\n")
html_fileList = []
for root, dirs, files in os.walk(BUILDDIR):
for name in files:
if re.search(r'blog',root):
if name.endswith(".html"):
try:
html_fileList.append(os.path.join(root.split('blog/')[1], name))
except IndexError:
html_fileList.append(name)
# generate TOC
for html_file in html_fileList:
div_blog_list = u'
\n\n'
year = 10000 # will be larger than the latest year for quite a while
# recall that entries are in reverse chronological order
table_opened = False
for entry in feed.entries:
date = entry.updated_datetime
if date.year < year:
# close the previous table if there is one
if table_opened:
div_blog_list += u'\n'
# write a new
tag with the smaller year
year = date.year
div_blog_list += u'\n
{0}
\n\n'.format(year)
div_blog_list += u'
\n'
table_opened = True
# write a new table row entry in Markdown, in the format:
#
#
'
fd, tmppath = tempfile.mkstemp()
os.close(fd)
with open(tmppath, 'w', encoding='utf-8') as tmpfile:
if os.path.exists("build/blog/index.html"):
with open("build/blog/index.html", 'r', encoding='utf-8') as indexmd:
lines = indexmd.readlines()
with open("build/blog/index.html", 'w', encoding='utf-8') as indexmd:
for line in lines:
indexmd.write(re.sub(r'{% generate blog_list here %}', div_blog_list, line))
def generate_notes_list():
""""Generate notes list """
sys.stderr.write("generating notes list\n")
html_fileList = []
for root, dirs, files in os.walk(BUILDDIR):
for name in files:
if re.search(r'notes',root):
if name.endswith(".html"):
try:
html_fileList.append(os.path.join(root.split('notes/')[1], name))
except IndexError:
html_fileList.append(name)
div_notes_list = u'
\n\n'
year = 10000 # will be larger than the latest year for quite a while
# recall that entries are in reverse chronological order
table_opened = False
for name in list(reversed(sorted(os.listdir(os.path.join(BUILDDIR, "notes"))))):
if re.match(r"^[0-9]{4}-[0-9]{2}-[0-9]{2}.*\.html", name):
htmlpath = os.path.join(BUILDDIR, "notes", name)
#tentry = AtomEntry()
#item = RssItem()
try:
with open(htmlpath, encoding="utf-8") as htmlfile:
soup = bs4.BeautifulSoup(htmlfile.read(), "lxml")
# generate atom entry
#entry.author = copy.deepcopy(feed.author) # assume it's always the same author
#entry_url = urllib.parse.urljoin(BLOG_HOME, "blog/%s" % name)
#entry.id_text = entry_url
#entry.id = ET.Element("id")
#entry.id.text = entry_url
relpath = "/notes/%s" % name
#entry.link = ET.Element("link", href=entry_url)
title_text = soup.title.text
#entry.title = ET.Element("title", type="html")
#entry.title.text = entry.title_text
post_date = soup.find("meta", attrs={"name": "date"})["content"]
updated_datetime = dateutil.parser.parse(post_date)
date = updated_datetime
if date.year < year:
# close the previous table if there is one
if table_opened:
div_notes_list += u'\n'
# write a new
tag with the smaller year
year = date.year
div_notes_list += u'\n
{0}
\n\n'.format(year)
div_notes_list += u'
\n'
table_opened = True
# write a new table row entry in Markdown, in the format:
#
#
\n' %
(date.isoformat(), monthday, relpath, title_text))
except Exception:
sys.stderr.write("error: failed to generate feed entry from %s\n" % name)
with open(htmlpath, encoding="utf-8") as htmlfile:
sys.stderr.write("dumping HTML:%s\n\n" % htmlfile.read())
raise
if table_opened:
div_notes_list += u'
\n'
div_notes_list += u'
'
fd, tmppath = tempfile.mkstemp()
os.close(fd)
with open(tmppath, 'w', encoding='utf-8') as tmpfile:
if os.path.exists("build/notes/index.html"):
with open("build/notes/index.html", 'r', encoding='utf-8') as indexmd:
lines = indexmd.readlines()
with open("build/notes/index.html", 'w', encoding='utf-8') as indexmd:
for line in lines:
indexmd.write(re.sub(r'{% generate notes_list here %}', div_notes_list, line))
def generate_index(feed):
"""Generate index.html from index.md and a TOC."""
sys.stderr.write("generating index.html\n")
# generate TOC
tocbuff = io.StringIO()
tocbuff.write('
')
year = 10000 # will be larger than the latest year for quite a while
# recall that entries are in reverse chronological order
table_opened = False
for entry in feed.entries:
date = entry.updated_datetime
if date.year < year:
# close the previous table if there is one
if table_opened:
tocbuff.write(u'\n')
# write a new
tag with the smaller year
year = date.year
tocbuff.write(u'\n
{0}
\n\n'.format(year))
tocbuff.write(u'
\n')
table_opened = True
# write a new table row entry in Markdown, in the format:
#
#
\n' %
(date.isoformat(), monthday, entry.title_text, entry.relpath))
if table_opened:
tocbuff.write(u'
\n')
tocbuff.write('
')
# create tempfile with index.md and the TOC concatenated, and generate index.html from that
# pylint: disable=invalid-name
fd, tmppath = tempfile.mkstemp()
os.close(fd)
with open(tmppath, 'w', encoding='utf-8') as tmpfile:
if os.path.exists(INDEXMD):
with open(INDEXMD, 'r', encoding='utf-8') as indexmd:
tmpfile.write(u"%s\n\n\n\n" % indexmd.read())
tmpfile.write("%s\n" % tocbuff.getvalue())
tocbuff.close()
pandoc_args = [
"pandoc", tmppath,
"--template", HTMLTEMPLATE,
"--highlight-style=pygments",
"-o", INDEXHTML,
]
try:
subprocess.check_call(pandoc_args)
except subprocess.CalledProcessError:
sys.stderr.write("error: failed to generate index.html\n")
os.remove(tmppath)
def make_sitemap_url_element(link, updated=None, changefreq=None, priority=None):
"""Make a sitemap element.
Parameters
----------
link : str or xml.etree.ElementTree.Element
If using an xml.etree.ElementTree.Element element, then it shall
be an atom:link element, e.g., .
updated : datetime or xml.etree.ElementTree.Element, optional
If using an xml.etree.ElementTree.Element element, then it shall
be an atom:updated element, e.g.,
2015-05-05T22:38:42-07:00.
changefreq : {"always", "hourly", "daily", "weekly", "monthly", "yearly", "never"}, optional
priority : {1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1}, optional
"""
urlelem = ET.Element("url")
loc = ET.Element("loc")
loc.text = link.attrib["href"] if isinstance(link, ET._Element) else link
urlelem.append(loc)
if updated is not None:
lastmod = ET.Element("lastmod")
lastmod.text = (updated.text if isinstance(updated, ET._Element)
else updated.isoformat())
urlelem.append(lastmod)
if changefreq is not None:
changefreq_elem = ET.Element("changefreq")
changefreq_elem.text = changefreq
urlelem.append(changefreq_elem)
if priority is not None:
priority_elem = ET.Element("priority")
priority_elem.text = "%.1f" % priority
urlelem.append(priority_elem)
return urlelem
def generate_sitemap(feed):
"""Generate sitemap.xml."""
sitemap = ET.Element("urlset", xmlns="http://www.sitemaps.org/schemas/sitemap/0.9")
# index
sitemap.append(make_sitemap_url_element(BLOG_HOME, feed.updated, "daily", 1.0))
# other top level pages
for name in os.listdir(BUILDDIR):
if (not name.endswith(".html") or name == "index.html" or
re.match("google[a-z0-9]+\.html", name)): # exclude Google's site ownership verification file
continue
link = urllib.parse.urljoin(BLOG_HOME, name)
fullpath = os.path.join(BUILDDIR, name)
# try to extract updated time
updated = None
with open(fullpath, encoding="utf-8") as htmlobj:
soup = bs4.BeautifulSoup(htmlobj.read(), "lxml")
if soup.footer is not None:
updated_tag = soup.footer.find(attrs={"class": "updated"})
if updated_tag is not None:
updated = dateutil.parser.parse(updated_tag.text)
sitemap.append(make_sitemap_url_element(link, updated, "monthly", 0.9))
# blog entries
for entry in feed.entries:
sitemap.append(make_sitemap_url_element(entry.link, entry.updated, "monthly", 0.9))
sitemappath = os.path.join(BUILDDIR, "sitemap.xml")
with open(sitemappath, "w", encoding="utf-8") as sitemapfile:
sitemapfile.write('\n%s\n' %
ET.tostring(sitemap).decode('utf-8'))
sys.stderr.write("wrote sitemap.xml\n")
def rewrite_title():
"""Override the title of some page for a better render"""
sys.stderr.write("Overriding some titles\n")
filenames =['build/index.html',
'build/blog/index.html',
'build/notes/index.html']
for root, dirs, files in os.walk(BUILDDIR):
for name in files:
if re.search(r'website($)',root):
if name.endswith(".html"):
try:
filenames.append("build"+os.path.join(root.split(BUILDDIR)[1], name))
except IndexError:
filenames.append(name)
if re.search(r'Documents($)',root):
if name.endswith(".html"):
try:
filenames.append("build"+os.path.join(root.split(BUILDDIR)[1], name))
except IndexError:
filenames.append(name)
if re.search(r'notes($)',root):
if name.endswith(".html"):
try:
filenames.append("build"+os.path.join(root.split(BUILDDIR)[1], name))
except IndexError:
filenames.append(name)
if re.search(r'blog($)',root):
if name.endswith(".html"):
try:
filenames.append("build"+os.path.join(root.split(BUILDDIR)[1], name))
except IndexError:
filenames.append(name)
h1_titles_list = []
h1_title = []
h2_titles_list = []
h2_title = []
fd, tmppath = tempfile.mkstemp()
os.close(fd)
for filename in filenames:
soup = bs4.BeautifulSoup(open(filename), "lxml")
for myh1 in soup.find_all("h1"):
if re.match("^(?!.*article-title).*$", str(myh1)):
h1_id = myh1['id']
h1_name = myh1.string
h1_title.append(str(myh1))
myh1['class'] = "h1"
myh1.string = ""
h1_span_left = soup.new_tag("span")
h1_span_left['class'] = "left-h1"
h1_span_left.string = "█▓▒░"
h1_span_title = soup.new_tag("span")
h1_span_title['class'] = "title-h1"
h1_span_title.string = "「"+h1_name+"」"
h1_span_right = soup.new_tag("span")
h1_span_right['class'] = "right-h1"
h1_span_right.string = "░▒▓█"
myh1.string.insert_before(h1_span_left)
myh1.span.insert_after(h1_span_right)
myh1.span.insert_after(h1_span_title)
h1_title.append(myh1)
h1_title.append(h1_name)
h1_titles_list.append(h1_title)
h1_title = []
for myh2 in soup.find_all("h2"):
if re.match("^(?!.*article-title).*$", str(myh2)):
h2_id = myh2['id']
h2_name = myh2.string
h2_title.append(str(myh2))
myh2['class'] = "h2"
myh2.string = ""
h2_span_left = soup.new_tag("span")
h2_span_left['class'] = "left-h2"
h2_span_left.string = ".: "
h2_span_title = soup.new_tag("span")
h2_span_title['class'] = "title-h2"
h2_span_title.string = h2_name
h2_span_right = soup.new_tag("span")
h2_span_right['class'] = "right-h2"
h2_span_right.string = " :."
myh2.string.insert_before(h2_span_left)
myh2.span.insert_after(h2_span_right)
myh2.span.insert_after(h2_span_title)
h2_title.append(myh2)
h2_title.append(h2_name)
h2_titles_list.append(h2_title)
h2_title = []
tested_title_list = []
tested_title = []
for filename in filenames:
soup = bs4.BeautifulSoup(open(filename), "lxml")
if os.path.exists(filename):
with open(filename, 'r', encoding='utf-8') as indexmd:
lines = indexmd.readlines()
with open(filename, 'w', encoding='utf-8') as indexmd:
for line in lines:
string = ""
for title in h1_titles_list:
if re.match(".*"+title[0]+".*", line):
string = str(title[1])
for title in h2_titles_list:
if re.match(".*"+title[0]+".*", line):
string = str(title[1])
if string != "":
indexmd.write(re.sub(line, string, line))
else:
indexmd.write(line)
def absolutify_links(soup, baseurl):
"""Make links in an article absolute.
Parameters
----------
soup : bs4.BeautifulSoup
baseurl : str
"""
for tag in soup.find_all(lambda tag: tag.has_attr("href")):
tag["href"] = urllib.parse.urljoin(baseurl, tag["href"])
for tag in soup.find_all(lambda tag: tag.has_attr("src")):
tag["src"] = urllib.parse.urljoin(baseurl, tag["src"])
def generate_index_and_feed():
"""Generate index.html and feeds (atom and rss)."""
# pylint: disable=too-many-statements,attribute-defined-outside-init,invalid-name
sys.stderr.write("generating atom and rss feeds\n")
# initialize atom feed
feed = AtomFeed()
feed.author = ET.fromstring(
""
"{author}"
"{home}"
"{email}"
"".format(author=AUTHOR, home=BLOG_HOME, email=AUTHOR_EMAIL))
feed.generator = ET.Element("generator", uri=GENERATOR_HOME_PAGE)
feed.generator.text = GENERATOR_NAME
if ATOM_ICON_PATH is not None:
feed.icon = ET.Element("icon")
feed.icon.text = urllib.parse.urljoin(BLOG_HOME, ATOM_ICON_PATH)
feed.id_text = BLOG_HOME
feed.id = ET.Element("id")
feed.id.text = feed.id_text
feed.links = [
ET.Element("link", href=urllib.parse.urljoin(BLOG_HOME, "atom.xml"), rel="self",
type="application/atom+xml"),
ET.Element("link", href=BLOG_HOME, rel="alternate",
type="text/html"),
]
feed.title_text = BLOG_TITLE
feed.title = ET.fromstring("{title}".format(title=BLOG_TITLE))
feed.subtitle_text = BLOG_DESCRIPTION
feed.subtitle = ET.fromstring("{subtitle}"
.format(subtitle=BLOG_DESCRIPTION))
# initialize rss feed
rss = RssFeed()
rss.rssurl = urllib.parse.urljoin(BLOG_HOME, "rss.xml")
rss.title = ET.Element("title")
rss.title.text = BLOG_TITLE
rss.link = ET.Element("link")
rss.link.text = BLOG_HOME
rss.description = ET.Element("description")
rss.description.text = BLOG_DESCRIPTION
rss.language = ET.Element("language")
rss.language.text = LANGUAGE
rss.author_text = "{email} ({name})".format(email=AUTHOR_EMAIL, name=AUTHOR)
rss.managingEditor = ET.Element("managingEditor")
rss.managingEditor.text = rss.author_text
rss.webMaster = ET.Element("webMaster")
rss.webMaster.text = rss.author_text
rss.generator = ET.Element("generator")
rss.generator.text = "{generator} ({url})".format(generator=GENERATOR_NAME,
url=GENERATOR_HOME_PAGE)
rss.image = ET.Element("image")
if RSS_ICON_PATH is not None:
ET.SubElement(rss.image, "url").text = urllib.parse.urljoin(BLOG_HOME, RSS_ICON_PATH)
rss.image.append(copy.deepcopy(rss.title))
rss.image.append(copy.deepcopy(rss.link))
ET.SubElement(rss.image, "width").text = str(RSS_ICON_WIDTH)
ET.SubElement(rss.image, "height").text = str(RSS_ICON_HEIGHT)
# update times will be set after everthing finishes
for name in os.listdir(os.path.join(BUILDDIR, "blog")):
if re.match(r"^[0-9]{4}-[0-9]{2}-[0-9]{2}.*\.html", name):
htmlpath = os.path.join(BUILDDIR, "blog", name)
entry = AtomEntry()
item = RssItem()
try:
with open(htmlpath, encoding="utf-8") as htmlfile:
soup = bs4.BeautifulSoup(htmlfile.read(), "lxml")
# generate atom entry
entry.author = copy.deepcopy(feed.author) # assume it's always the same author
entry_url = urllib.parse.urljoin(BLOG_HOME, "blog/%s" % name)
entry.id_text = entry_url
entry.id = ET.Element("id")
entry.id.text = entry_url
entry.relpath = "/blog/%s" % name
entry.link = ET.Element("link", href=entry_url)
entry.title_text = soup.title.text
entry.title = ET.Element("title", type="html")
entry.title.text = entry.title_text
post_date = soup.find("meta", attrs={"name": "date"})["content"]
entry.updated_datetime = dateutil.parser.parse(post_date)
entry.updated = ET.Element("updated")
# pylint: disable=no-member
entry.updated.text = entry.updated_datetime.isoformat()
# process content
tags_to_remove = []
# mark header and footer for removal
article = soup.article
if article.header is not None:
tags_to_remove.append(article.header)
# mark line numbers for removal
for line_number_span in article.find_all("span",
attrs={"class": "line-number"}):
tags_to_remove.append(line_number_span)
# mark script tags for removal
for script_tag in article.find_all("script"):
tags_to_remove.append(script_tag)
# make internal links absolute
absolutify_links(article, entry_url)
# remove marked tags
for tag in tags_to_remove:
tag.extract()
entry.content_html = ''.join([str(content)
for content in article.contents])
entry.content = ET.Element("content", type="html")
entry.content.text = ET.CDATA(entry.content_html)
entry.assemble_entry()
feed.entries.append(entry)
# generate rss item
item.title = ET.Element("title")
item.title.text = entry.title_text
item.link = ET.Element("link")
item.link.text = entry_url
item.description = ET.Element("description")
item.description.text = ET.CDATA(entry.content_html)
item.author = ET.Element("author")
item.author.text = rss.author_text
item.guid = ET.Element("guid", isPermaLink="true")
item.guid.text = item.link.text
item.timestamp = entry.updated_datetime.timestamp()
item.pubDate = ET.Element("pubDate")
item.pubDate.text = email.utils.formatdate(item.timestamp, usegmt=True)
item.assemble_item()
rss.items.append(item)
except Exception:
sys.stderr.write("error: failed to generate feed entry from %s\n" % name)
with open(htmlpath, encoding="utf-8") as htmlfile:
sys.stderr.write("dumping HTML:%s\n\n" % htmlfile.read())
raise
# sort entries by reverse chronological order
feed.entries.sort(key=lambda entry: entry.updated_datetime, reverse=True)
rss.items.sort(key=lambda item: item.timestamp, reverse=True)
generate_index(feed)
generate_menu()
generate_table()
generate_blog_list(feed)
generate_notes_list()
rewrite_title()
feed.updated_datetime = current_datetime()
feed.updated = ET.Element("updated")
feed.updated.text = feed.updated_datetime.isoformat()
rss.update_timestamp = time.time()
rss.pubDate = ET.Element("pubDate")
rss.pubDate.text = email.utils.formatdate(rss.update_timestamp, usegmt=True)
rss.lastBuildDate = ET.Element("lastBuildDate")
rss.lastBuildDate.text = email.utils.formatdate(rss.update_timestamp, usegmt=True)
with open(ATOM, "w", encoding="utf-8") as atom:
atom.write("%s\n" % feed.dump_feed(FEED_MAX_ENTRIES))
sys.stderr.write("wrote atom.xml\n")
with open(RSS, "w", encoding="utf-8") as rssxml:
rssxml.write("%s\n" % rss.dump_rss(FEED_MAX_ENTRIES))
sys.stderr.write("wrote rss.xml\n")
generate_sitemap(feed)
def _pre_tag_insert_line_numbers(soup, pre_tag):
"""Insert line numbers to a pre tag."""
num_lines = len(pre_tag.text.split("\n"))
for line_number in range(1, num_lines + 1):
# line number divs will look like:
#
#
ln_tag = soup.new_tag("span")
ln_tag["class"] = "line-number"
ln_tag["data-line"] = line_number
ln_tag["style"] = "top: %.2fem" % ((line_number - 1) * 1.35)
# add a comment to the content of the span to suppress tidy5
# empty tag warning
ln_tag.append(soup.new_string("", bs4.Comment))
pre_tag.code.append(ln_tag)
# MARKDOWN EXTENSION!
#
# See docstring of process_image_sizes for documentation.
# If matched, 1st group is width, 3rd group (optional) is height, and
# 4th group is actual text.
IMAGESIZE_EXTRACTOR = re.compile(r'\|(\d+)(x(\d+))?\|\s*(.*)')
def process_image_sizes(soup):
"""Process the image size Markdown extension.
Allows specifying image size in a Markdown image construct
![](). The syntax is:
![|width(xheight)?| alt](src)
where width and height are positive integers (xheight is optional),
and alt is the regular alt string (either plain or with some
Markdown formatting). alt string, as usual, is optional.
Examples:
![|1920x1080| Hello, world!](http://example.com/hello.png)
![|1920| *Hey!*](http://example.com/hey.png)
![|1280x800|](http://example.com/noalt.png)
"""
if not soup.article:
return
for img_tag in soup.article.find_all("img"):
if img_tag.has_attr("alt"):
match = IMAGESIZE_EXTRACTOR.match(img_tag["alt"])
if match:
width, _, height, realalt = match.groups()
img_tag["width"] = width
if height:
img_tag["height"] = height
img_tag["alt"] = realalt
# strip image specs from captions, if any
for caption in soup.article.select(".figure .caption"):
if hasattr(caption, "contents") and isinstance(caption.contents[0], str):
match = IMAGESIZE_EXTRACTOR.match(caption.contents[0])
if match:
caption.contents[0].replace_with(match.group(4))
def link_img_tags(soup):
"""Convert each tag in to a link to its original."""
if not soup.article:
return
for img_tag in soup.article.find_all("img"):
a_tag = soup.new_tag("a", href=img_tag["src"], target="_blank")
a_tag.insert(0, copy.copy(img_tag))
img_tag.replace_with(a_tag)
def process_footnote_backlinks(soup):
"""Add class attribute "footnotes-backlink" to each footnote backlink."""
for footnotes in soup.find_all("div", attrs={"class": "footnotes"}):
for fn_a_tag in footnotes.find_all(lambda tag:
tag.name == "a" and
tag.has_attr("href") and
tag["href"].startswith("#fnref") and
tag.string == "\u21A9"): # U+21A9: LEFTWARDS ARROW WITH HOOK
fn_a_tag["class"] = "footnotes-backlink"
fn_a_tag.string = "\u21A9\uFE0E" # U+FE0E: VARIATION SELECTOR-15
def postprocess_html_file(htmlfilepath):
"""Perform a series of postprocessing to an HTML file."""
with open(htmlfilepath, "r+", encoding="utf-8") as htmlfileobj:
soup = bs4.BeautifulSoup(htmlfileobj.read(), "lxml")
# a series of postprocessing (extensible)
process_image_sizes(soup)
link_img_tags(soup)
process_footnote_backlinks(soup)
# write back
htmlfileobj.seek(0)
htmlfileobj.write(str(soup))
htmlfileobj.truncate()
def static_vars(**kwargs):
def decorate(func):
for k in kwargs:
setattr(func, k, kwargs[k])
return func
return decorate
# exclude_list is only inialized once to avoid constant disk IO
@static_vars(exclude_list=None)
def generate_blog(fresh=False, report_total_errors=True):
"""Generate the blog in BUILDDIR.
Parameters
----------
fresh : bool
If True, remove all existing build artifects and start afresh;
otherwise, only copy or build new or modified files. Default is
False.
report_total_errors : bool
If True, a line will be printed to stderr at the end of build
(assuming the function doesn't raise early) reporting the total
number of errors, e.g., "build finished with 0 errors". This is
turned on by default, but pass False to turn it off, which will
result in a completely silent session if nothing changed. This
is useful for auto-regen, for instance.
Returns
-------
failed_builds : int
Number of build failures.
"""
# pylint: disable=too-many-branches,too-many-locals,too-many-statements
if not os.path.isdir(SOURCEDIR):
raise OSError("source directory %s does not exist" % SOURCEDIR)
if not os.path.exists(HTMLTEMPLATE):
raise OSError("HTML template %s not found" % HTMLTEMPLATE)
if not os.path.isdir(BUILDDIR):
if os.path.exists(BUILDDIR):
os.remove(BUILDDIR)
os.mkdir(BUILDDIR, mode=0o755)
if fresh:
for name in os.listdir(BUILDDIR):
if name == ".git":
continue
obj = os.path.join(BUILDDIR, name)
if os.path.isdir(obj):
shutil.rmtree(obj)
else:
os.remove(obj)
# nojekyll: https://help.github.com/articles/files-that-start-with-an-underscore-are-missing/
if not os.path.exists(os.path.join(BUILDDIR, ".nojekyll")):
with open(os.path.join(BUILDDIR, ".nojekyll"), "w") as fileobj:
pass
if CUSTOM_DOMAIN and not os.path.exists(os.path.join(BUILDDIR, "CNAME")):
with open(os.path.join(BUILDDIR, "CNAME"), "w") as fileobj:
fileobj.write(CUSTOM_DOMAIN)
failed_builds = 0
generator_mtime = os.path.getmtime(GENERATORSOURCE)
template_mtime = os.path.getmtime(HTMLTEMPLATE)
fundamental_mtime = max(generator_mtime, template_mtime)
anything_modified = False
exclude_list = generate_blog.exclude_list # get value of static variable
if exclude_list is None:
try:
with open(EXCLUDELIST) as fp:
exclude_list = [os.path.abspath(os.path.join(SOURCEDIR, line.rstrip()))
for line in list(fp)
if line.strip() != "" and not line.startswith('#')]
except OSError:
exclude_list = []
generate_blog.exclude_list = exclude_list # assign to static variable for the future
for root, dirs, files in os.walk(SOURCEDIR):
# If root is in exclude list, skip all files and remove all subdirs from traversal list.
if root in exclude_list:
dirs[:] = []
continue
relroot = os.path.relpath(root, start=SOURCEDIR)
dstroot = os.path.join(BUILDDIR, relroot)
if not os.path.isdir(dstroot):
if os.path.exists(dstroot):
os.remove(dstroot)
os.mkdir(dstroot, mode=0o755)
for name in files:
if name.startswith('.') or os.path.join(root, name) in exclude_list:
continue
extension = name.split(".")[-1]
if extension not in ["css", "js", "asc", "html", "jpg", "md", "png", "svg", "ico", "txt",
"eot", "ttf", "woff", "woff2"]:
continue
relpath = os.path.join(relroot, name)
srcpath = os.path.join(root, name)
if extension == "md":
dstpath = os.path.join(dstroot, re.sub(r'\.md$', '.html', name))
else:
dstpath = os.path.join(dstroot, name)
if ((not os.path.exists(dstpath) or
os.path.getmtime(dstpath) <=
max(fundamental_mtime, os.path.getmtime(srcpath)))):
# new post or modified post
anything_modified = True
if srcpath == INDEXMD:
continue # index will be processed separately
if extension in ["css", "js", "asc", "html", "jpg", "png", "svg", "ico", "txt",
"eot", "ttf", "woff", "woff2"]:
sys.stderr.write("copying %s\n" % relpath)
shutil.copy(srcpath, dstpath)
elif extension == "md":
sys.stderr.write("compiling %s\n" % relpath)
pandoc_args = [
"pandoc", srcpath,
"--template", HTMLTEMPLATE,
"--highlight-style=pygments",
"-o", dstpath,
]
try:
subprocess.check_call(pandoc_args)
except subprocess.CalledProcessError:
failed_builds += 1
sys.stderr.write("error: failed to generate %s" %
relpath)
# postprocess generated HTML file
postprocess_html_file(dstpath)
if anything_modified:
generate_index_and_feed()
sys.stderr.write("done\n")
if report_total_errors:
sys.stderr.write("build finished with %d errors\n" % failed_builds)
return failed_builds
def generate(args):
"""Wrapper for generate_blog(fresh=False)."""
# pylint: disable=unused-argument
exit(generate_blog(fresh=False))
def regenerate(args):
"""Wrapper for generate_blog(fresh=True)."""
# pylint: disable=unused-argument
exit(generate_blog(fresh=True))
def sanitize(string):
"""Sanitize string (title) for URI consumption."""
if isinstance(string, bytes):
string = string.decode('utf-8')
# to lowercase
string = string.lower()
# strip all non-word, non-hyphen and non-whitespace characters
string = re.sub(r"[^\w\s-]", "", string)
# replace consecutive whitespaces with a single hyphen
string = re.sub(r"\s+", "-", string)
# percent encode the result
return urllib.parse.quote(string)
def edit_post_with_editor(path):
"""Launch text editor to edit post at a given path.
Text editor is $VISUAL, then if empty, $EDITOR, then if still empty,
vi.
"""
if "VISUAL" in os.environ:
editor = os.environ["VISUAL"]
elif "EDITOR" in os.environ:
editor = os.environ["EDITOR"]
else:
editor = "vi"
subprocess.call([editor, path])
def new_post(title):
"""Create a new post with metadata pre-filled.
The path to the new post is printed to stdout.
Returns
-------
0
On success.
"""
date = current_datetime()
filename_date = date.strftime("%Y-%m-%d")
iso_date = date.isoformat()
display_date = "%s %d, %d" % (date.strftime("%B"), date.day, date.year)
title_sanitized = sanitize(title)
filename = "%s-%s.md" % (filename_date, title_sanitized)
fullpath = os.path.join(POSTSDIR, filename)
if not os.path.isdir(POSTSDIR):
if os.path.exists(POSTSDIR):
os.remove(POSTSDIR)
os.mkdir(POSTSDIR, mode=0o755)
if os.path.exists(fullpath):
sys.stderr.write("%serror: '%s' already exists, please pick a different title%s\n" %
(RED, fullpath, RESET))
return 1
with open(fullpath, 'w', encoding='utf-8') as newpost:
newpost.write("---\n")
newpost.write('title: "%s"\n' % title)
newpost.write("date: %s\n" % iso_date)
newpost.write("date_display: %s\n" % display_date)
newpost.write("---\n\n")
sys.stderr.write("New post created in:\n")
print(fullpath)
edit_post_with_editor(fullpath)
return 0
def new_post_cli(args):
"""CLI wrapper around new_post."""
new_post(args.title)
def touch(filename):
"""Update the timestamp of a post to the current time."""
filename = os.path.basename(filename)
fullpath = os.path.join(POSTSDIR, filename)
if not os.path.exists(fullpath):
sys.stderr.write("%serror: post %s not found %s\n" %
(RED, fullpath, RESET))
return 1
filename_prefix_re = re.compile(r"^[0-9]{4}-[0-9]{2}-[0-9]{2}")
if not filename_prefix_re.match(filename):
sys.stderr.write(RED)
sys.stderr.write("error: post %s is not a valid post\n" % filename)
sys.stderr.write("error: the filename of a valid post begins with "
"a date in the form xxxx-xx-xx\n")
sys.stderr.write(RESET)
return 1
# update timestamp in the metadata section of the post
whatchanged = io.StringIO()
date = current_datetime()
iso_date = date.isoformat()
display_date = "%s %d, %d" % (date.strftime("%B"), date.day, date.year)
filename_date = date.strftime("%Y-%m-%d")
with fileinput.input(files=(fullpath), inplace=True) as lines:
meta_fences = 0
for line in lines:
if line.startswith("---"):
meta_fences += 1
sys.stdout.write(line)
continue
if meta_fences >= 2:
# already went past the metadata section
sys.stdout.write(line)
continue
if line.startswith("date: "):
updated_line = "date: %s\n" % iso_date
sys.stdout.write(updated_line)
whatchanged.write("-%s+%s\n" % (line, updated_line))
continue
if line.startswith("date_display: "):
updated_line = "date_display: %s\n" % display_date
sys.stdout.write(updated_line)
whatchanged.write("-%s+%s\n" % (line, updated_line))
continue
sys.stdout.write(line)
sys.stderr.write("\n%schangeset:%s\n\n%s" %
(YELLOW, RESET, whatchanged.getvalue()))
whatchanged.close()
# check if the file needs to be renamed
new_filename = filename_prefix_re.sub(filename_date, filename)
if new_filename != filename:
new_fullpath = os.path.join(POSTSDIR, new_filename)
os.rename(fullpath, new_fullpath)
sys.stderr.write("%srenamed to %s%s\n" % (YELLOW, new_filename, RESET))
return 0
def touch_cli(args):
"""CLI wrapper around touch."""
touch(args.filename)
def deploy(args):
"""Deploys build directory to origin/master without regenerating.
Returns
-------
0
On success. Exit early with nonzero status otherwise.
"""
# pylint: disable=unused-argument,too-many-statements
# check whether root is dirty
os.chdir(ROOTDIR)
dirty = subprocess.check_output(["git", "status", "--porcelain"])
if dirty:
sys.stderr.write(YELLOW)
sys.stderr.write("Project root is dirty.\n")
sys.stderr.write("You may want to commit in your changes "
"to the source branch, since the SHA and title "
"of the latest commit on the source branch will be "
"incorporated into the commit message on "
"the deployment branch. Type s[hell] on the "
"next prompt to open an interactive shell.\n")
sys.stderr.write(RESET)
while True:
sys.stderr.write("Continue? [yNs] ")
answer = input()
if not answer:
# default
abort = True
break
elif answer.startswith(('y', 'Y')):
abort = False
break
elif answer.startswith(('n', 'N')):
abort = True
break
elif answer.startswith(('s', 'S')):
shell = (os.environ['SHELL'] if 'SHELL' in os.environ and os.environ['SHELL']
else 'zsh')
subprocess.call(shell)
stilldirty = subprocess.check_output(["git", "status", "--porcelain"])
if stilldirty:
sys.stderr.write(YELLOW)
sys.stderr.write("Project root is still dirty.\n")
sys.stderr.write(RESET)
else:
sys.stderr.write("Please answer yes or no.\n")
if abort:
sys.stderr.write("%saborting deployment%s\n" % (RED, RESET))
return 1
# extract latest commit on the source branch
source_commit = subprocess.check_output(
["git", "log", "-1", "--pretty=oneline", "source", "--"]).decode('utf-8').strip()
# cd into BUILDDIR and assemble commit message
sys.stderr.write("%scommand: cd '%s'%s\n" % (BLUE, BUILDDIR, RESET))
os.chdir(BUILDDIR)
# extract updated time from atom.xml
if not os.path.exists("atom.xml"):
sys.stderr.write("atom.xml not found, cannot deploy\naborting\n")
return 1
atomxml = ET.parse("atom.xml").getroot()
updated = atomxml.find('{http://www.w3.org/2005/Atom}updated').text
commit_message = ("Site updated at %s\n\nsource branch was at:\n%s\n" %
(updated, source_commit))
# commit changes in BUILDDIR
sys.stderr.write("%scommand: git add --all%s\n" % (BLUE, RESET))
subprocess.check_call(["git", "add", "--all"])
sys.stderr.write("%scommand: git commit --no-verify --gpg-sign --message='%s'%s\n" %
(BLUE, commit_message, RESET))
try:
subprocess.check_call(["git", "commit", "--gpg-sign",
"--message=%s" % commit_message])
except subprocess.CalledProcessError:
sys.stderr.write("\n%serror: git commit failed%s\n" % (RED, RESET))
return 1
# check dirty status
dirty = subprocess.check_output(["git", "status", "--porcelain"])
if dirty:
sys.stderr.write(RED)
sys.stderr.write("error: failed to commit all changes; "
"build directory still dirty\n")
sys.stderr.write("error: please manually inspect what was left out\n")
sys.stderr.write(RESET)
return 1
# push to origin/master
sys.stderr.write("%scommand: git push origin master%s\n" % (BLUE, RESET))
try:
subprocess.check_call(["git", "push", "origin", "master"])
except subprocess.CalledProcessError:
sys.stderr.write("\n%serror: git push failed%s\n" % (RED, RESET))
return 1
return 0
def gen_deploy(args):
"""Regenerate and deploy."""
# pylint: disable=unused-argument,too-many-branches
# try to smartly determine the latest post, and prompt to touch it
current_time = time.time()
latest_post = None
latest_postdate = 0
latest_mtime = 0
for name in os.listdir(POSTSDIR):
matchobj = re.match(r"^([0-9]{4})-([0-9]{2})-([0-9]{2})-.*\.md", name)
if not matchobj:
continue
fullpath = os.path.join(POSTSDIR, name)
mtime = os.path.getmtime(fullpath)
# get post date from the date metadata field of the post
postdate = 0
with open(fullpath) as postobj:
for line in postobj:
dateregex = r"^date: (\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}-\d{2}:?\d{2})"
datematch = re.match(dateregex, line.rstrip())
if datematch:
postdate = dateutil.parser.parse(datematch.group(1)).timestamp()
break
# skip the post if it is dated more than three days ago
if current_time - postdate > 3 * 24 * 3600:
continue
if mtime > latest_mtime:
latest_post = name
latest_postdate = postdate
latest_mtime = mtime
# prompt for touching if the latest post determined above was
# modified within the last hour but the date registered in the post
# isn't within the last ten minutes
if ((latest_post is not None and current_time - latest_mtime < 3600 and
current_time - latest_postdate > 600)):
sys.stderr.write("%sIt appears that %s might be a new post.\n"
"Do you want to touch its timestamp?%s\n" %
(GREEN, latest_post, RESET))
while True:
yesnoquit = input("[ynq]: ")
if yesnoquit.startswith(("Y", "y")):
yesno = True
break
elif yesnoquit.startswith(("N", "n")):
yesno = False
break
elif yesnoquit.startswith(("Q", "q")):
sys.stderr.write("%saborting gen_deploy%s\n" % (RED, RESET))
return 1
else:
sys.stderr.write("Please answer yes, no, or quit.\n")
if yesno:
sys.stderr.write("%stouching %s%s\n" % (BLUE, latest_post, RESET))
touch(latest_post)
sys.stderr.write("\n")
generate_blog(fresh=True)
deploy(None)
class HTTPServerProcess(multiprocessing.Process):
"""This class can be used to run an HTTP server."""
def __init__(self, rootdir):
"""Initialize the HTTPServerProcess class.
Parameters
----------
rootdir : str
The root directory to serve from.
"""
super().__init__()
self.rootdir = rootdir
def run(self):
"""Create an HTTP server and serve forever.
Runs on localhost. The default port is 8000; if it is not
available, a random port is used instead.
"""
os.chdir(self.rootdir)
# pylint: disable=invalid-name
HandlerClass = http.server.SimpleHTTPRequestHandler
try:
httpd = http.server.HTTPServer(("", 8001), HandlerClass)
except OSError:
httpd = http.server.HTTPServer(("", 0), HandlerClass)
_, portnumber = httpd.socket.getsockname()
sys.stderr.write("server serving on http://localhost:%d\n" % portnumber)
try:
httpd.serve_forever()
except KeyboardInterrupt:
httpd.shutdown()
def preview(args):
"""Serve the blog and auto regenerate upon changes."""
# pylint: disable=unused-argument
server_process = HTTPServerProcess(BUILDDIR)
server_process.start()
sys.stderr.write("watching for changes\n")
sys.stderr.write("send SIGINT to stop\n")
# install a SIGINT handler only for this process
sigint_raised = False
def sigint_mitigator(signum, frame):
"""Translate SIGINT to setting the sigint_raised flag."""
nonlocal sigint_raised
sigint_raised = True
signal.signal(signal.SIGINT, sigint_mitigator)
# Watch and auto-regen.
# No need to actually implement watch separately, since
# generate_blog(fresh=False, report_total_errors=False) already
# watches for modifications and only regens upon changes, and it is
# completely silent when there's no change.
while not sigint_raised:
generate_blog(fresh=False, report_total_errors=False)
time.sleep(0.5)
sys.stderr.write("\nSIGINT received, cleaning up...\n")
server_process.join()
return 0
def list_posts():
"""List all posts, with date, title, and path to source file.
This function only lists posts that has been built (since it reads
metadata from HTML rather than Markdown).
Returns
-------
posts : list
A list of posts, in reverse chronological order, where each
element is a tuple of (date, title, path to source file).
"""
posts = []
for name in os.listdir(os.path.join(BUILDDIR, "blog")):
if not re.match(r"^[0-9]{4}-[0-9]{2}-[0-9]{2}.*\.html", name):
continue
htmlpath = os.path.join(BUILDDIR, "blog", name)
entry = AtomEntry()
item = RssItem()
try:
with open(htmlpath, encoding="utf-8") as htmlfile:
soup = bs4.BeautifulSoup(htmlfile.read(), "lxml")
title = soup.title.text
date = dateutil.parser.parse(soup.find("meta", attrs={"name": "date"})["content"])
source_path = os.path.join(POSTSDIR, re.sub(r'.html$', '.md', name))
posts.append((date, title, source_path))
except Exception:
sys.stderr.write("error: failed to read metadata from HTML file %s\n" % name)
with open(htmlpath, encoding="utf-8") as htmlfile:
sys.stderr.write("dumping HTML:%s\n\n" % htmlfile.read())
raise
posts.sort(key=lambda post: post[0], reverse=True)
return posts
class PostSelector:
def __init__(self, term, posts):
self._term = term
self.posts_per_page = term.height - 2
self.pages = [posts[i:i+self.posts_per_page]
for i in range(0, len(posts), self.posts_per_page)]
self.num_pages = len(self.pages)
self.pagepos = 0
self.postpos = 0
self.inserting = False # True if in the middle of inserting a post #, False otherwise
term.enter_fullscreen()
print(term.clear(), end="")
sys.stdout.flush()
self.selection = ""
self.quit = False
self.display_page()
def _clear_to_eol(self):
term = self._term
print(term.clear_eol, end="")
sys.stdout.flush()
def _print_line(self, line, linenum, highlight=False):
term = self._term
width = term.width
with term.location(0, linenum):
if highlight:
print(term.reverse(line[:width]), end="")
else:
print(line[:width], end="")
self._clear_to_eol()
def _print_post(self, page, pos, highlight=False):
if pos >= len(page):
# if position out of range, just clear the line
self._print_line("", pos + 1, highlight)
else:
date, title, path = page[pos]
line = "%3d: %s %s" % (pos, date.strftime("%m/%d/%y"), title)
self._print_line(line, pos + 1, highlight)
def display_page(self):
term = self._term
page = self.pages[self.pagepos]
with term.hidden_cursor():
topline = " PAGE %d/%d POST %d" % (self.pagepos + 1, self.num_pages, self.postpos)
if self.inserting:
topline += term.blink("_")
self._print_line(topline, 0, highlight=True)
for i in range(self.posts_per_page):
self._print_post(page, i)
# highlight selected post
self._print_post(page, self.postpos, highlight=True)
bottomline = " Press h for help."
self._print_line(bottomline, term.height - 1, highlight=True)
def dispatch(self, key):
term = self._term
if key in string.digits:
# insert
if self.inserting:
newpostpos = 10 * self.postpos + int(key)
if newpostpos < len(self.pages[self.pagepos]):
self.postpos = newpostpos
else:
self.postpos = int(key)
self.inserting = True
elif key.name == "KEY_DELETE":
self.postpos //= 10
self.inserting = True
else:
self.inserting = False
if key.name == "KEY_ENTER":
self.selection = self.pages[self.pagepos][self.postpos][2]
if key in {"q", "Q"}:
self.quit = True
elif key.name == "KEY_DOWN" or key in {"n", "N"}:
if self.postpos + 1 < len(self.pages[self.pagepos]):
self.postpos += 1
elif key.name == "KEY_UP" or key in {"p", "P"}:
if self.postpos > 0:
self.postpos -= 1
elif key.name == "KEY_RIGHT" or key in {".", ">"}:
if self.pagepos + 1 < self.num_pages:
self.pagepos += 1
self.postpos = 0
elif key.name == "KEY_LEFT" or key in {",", "<"}:
if self.pagepos > 0:
self.pagepos -= 1
self.postpos = 0
elif key in {"h", "H"}:
print(term.clear_eol, end="")
sys.stdout.flush()
help_text_lines = [
"Next post: n or ",
"Previous post: p or ",
"Next page: . or > or ",
"Previous page: , or < or ",
"Select post: or ",
"Select by number: type number as shown (delete or backspace to edit)",
"Get help: h",
"Quit program: q",
]
for i in range(term.height - 1):
self._print_line(help_text_lines[i] if i < len(help_text_lines) else "", i)
bottomline = " Press any key to continue."
self._print_line(bottomline, term.height - 1, highlight=True)
with term.raw():
term.inkey()
def restore(self):
term = self._term
term.exit_fullscreen()
print(term.clear(), end="")
sys.stdout.flush()
def select(self):
term = self._term
try:
while True:
with term.raw():
self.dispatch(term.inkey())
if self.selection or self.quit:
break
self.display_page()
except Exception:
raise
finally:
self.restore()
return self.selection
def edit_existing_post(args):
selector = PostSelector(blessed.Terminal(), list_posts())
selection = selector.select()
if selection:
print(selection)
edit_post_with_editor(selection)
else:
return 1
def main():
"""CLI interface."""
description = "Simple blog generator in Python with Pandoc as backend."
parser = argparse.ArgumentParser(description=description)
subparsers = parser.add_subparsers(dest="action")
subparsers.required = True
parser_new_post = subparsers.add_parser(
"new_post", aliases=["n", "new"],
description="Create a new post with metadata pre-filled.")
parser_new_post.add_argument("title", help="title of the new post")
parser_new_post.set_defaults(func=new_post_cli)
parser_new_post = subparsers.add_parser(
"touch", aliases=["t", "tou"],
description="""Touch an existing post, i.e., update its
timestamp to current time. Why is this ever useful? Well, the
timestamp filled in by new_post is the time of creation, but one
might spend several hours after the creation of the file to
finish the post. Sometimes the post is even created on one day
and finished on another (say created at 11pm and finished at
1am). Therefore, one may want to retouch the timestamp before
publishing.""")
parser_new_post.add_argument("filename",
help="path or basename of the source file, "
"e.g., 2015-05-05-new-blog-new-start.md")
parser_new_post.set_defaults(func=touch_cli)
parser_generate = subparsers.add_parser(
"generate", aliases=["g", "gen"],
description="Generate new or changed objects.")
parser_generate.set_defaults(func=generate)
parser_regenerate = subparsers.add_parser(
"regenerate", aliases=["r", "regen"],
description="Regenerate the entire blog afresh.")
parser_regenerate.set_defaults(func=regenerate)
parser_new_post = subparsers.add_parser(
"preview", aliases=["p", "pre"],
description="Serve the blog locally and auto regenerate upon changes.")
parser_new_post.set_defaults(func=preview)
parser_new_post = subparsers.add_parser(
"deploy", aliases=["d", "dep"],
description="Deploy build/ to origin/master without regenerating.")
parser_new_post.set_defaults(func=deploy)
parser_new_post = subparsers.add_parser(
"gen_deploy", aliases=["gd", "gendep"],
description="Rebuild entire blog and deploy build/ to origin/master.")
parser_new_post.set_defaults(func=gen_deploy)
parser_new_post = subparsers.add_parser(
"edit", aliases=["e", "ed"],
description="Bring up post selector to select post for editing.")
parser_new_post.set_defaults(func=edit_existing_post)
with init_colorama():
args = parser.parse_args()
returncode = args.func(args)
exit(returncode)
if __name__ == '__main__':
main()