#!/bin/python3 from contextlib import contextmanager import colorama import time import datetime import dateutil.tz import bs4 import urllib.parse import re import lxml.etree as ET @contextmanager def init_colorama(): """Set global foreground modifying ANSI codes. BLACK, BLUE, CYAN, GREEN, MAGENTA, RED, WHITE, YELLOW, and RESET. """ # pylint: disable=exec-used,invalid-name colorama.init() for color, ansi in colorama.Fore.__dict__.items(): exec("global {0}; {0} = '{1}'".format(color, ansi)) yield for color in colorama.Fore.__dict__: exec("global {0}; {0} = ''".format(color)) colorama.deinit() def current_datetime(): """Return the current datetime, complete with tzinfo. Precision is one second. Timezone is the local timezone. """ return datetime.datetime.fromtimestamp(round(time.time()), dateutil.tz.tzlocal()) def absolutify_links(soup, baseurl): """Make links in an article absolute. Parameters ---------- soup : bs4.BeautifulSoup baseurl : str """ for tag in soup.find_all(lambda tag: tag.has_attr("href")): tag["href"] = urllib.parse.urljoin(baseurl, tag["href"]) for tag in soup.find_all(lambda tag: tag.has_attr("src")): tag["src"] = urllib.parse.urljoin(baseurl, tag["src"]) # MARKDOWN EXTENSION! # # See docstring of process_image_sizes for documentation. # If matched, 1st group is width, 3rd group (optional) is height, and # 4th group is actual text. IMAGESIZE_EXTRACTOR = re.compile(r'\|(\d+)(x(\d+))?\|\s*(.*)') def process_image_sizes(soup): """Process the image size Markdown extension. Allows specifying image size in a Markdown image construct ![](). The syntax is: ![|width(xheight)?| alt](src) where width and height are positive integers (xheight is optional), and alt is the regular alt string (either plain or with some Markdown formatting). alt string, as usual, is optional. Examples: ![|1920x1080| Hello, world!](http://example.com/hello.png) ![|1920| *Hey!*](http://example.com/hey.png) ![|1280x800|](http://example.com/noalt.png) """ if not soup.article: return for img_tag in soup.article.find_all("img"): if img_tag.has_attr("alt"): match = IMAGESIZE_EXTRACTOR.match(img_tag["alt"]) if match: width, _, height, realalt = match.groups() img_tag["width"] = width if height: img_tag["height"] = height img_tag["alt"] = realalt # strip image specs from captions, if any for caption in soup.article.select(".figure .caption"): if hasattr(caption, "contents") and isinstance(caption.contents[0], str): match = IMAGESIZE_EXTRACTOR.match(caption.contents[0]) if match: caption.contents[0].replace_with(match.group(4)) def make_sitemap_url_element(link, updated=None, changefreq=None, priority=None): """Make a sitemap element. Parameters ---------- link : str or xml.etree.ElementTree.Element If using an xml.etree.ElementTree.Element element, then it shall be an atom:link element, e.g., . updated : datetime or xml.etree.ElementTree.Element, optional If using an xml.etree.ElementTree.Element element, then it shall be an atom:updated element, e.g., 2015-05-05T22:38:42-07:00. changefreq : {"always", "hourly", "daily", "weekly", "monthly", "yearly", "never"}, optional priority : {1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1}, optional """ urlelem = ET.Element("url") loc = ET.Element("loc") loc.text = link.attrib["href"] if isinstance(link, ET._Element) else link urlelem.append(loc) if updated is not None: lastmod = ET.Element("lastmod") lastmod.text = (updated.text if isinstance(updated, ET._Element) else updated.isoformat()) urlelem.append(lastmod) if changefreq is not None: changefreq_elem = ET.Element("changefreq") changefreq_elem.text = changefreq urlelem.append(changefreq_elem) if priority is not None: priority_elem = ET.Element("priority") priority_elem.text = "%.1f" % priority urlelem.append(priority_elem) return urlelem def link_img_tags(soup): """Convert each tag in
to a link to its original.""" if not soup.article: return for img_tag in soup.article.find_all("img"): a_tag = soup.new_tag("a", href=img_tag["src"], target="_blank") a_tag.insert(0, copy.copy(img_tag)) img_tag.replace_with(a_tag) def _pre_tag_insert_line_numbers(soup, pre_tag): """Insert line numbers to a pre tag.""" num_lines = len(pre_tag.text.split("\n")) for line_number in range(1, num_lines + 1): # line number divs will look like: # # ln_tag = soup.new_tag("span") ln_tag["class"] = "line-number" ln_tag["data-line"] = line_number ln_tag["style"] = "top: %.2fem" % ((line_number - 1) * 1.35) # add a comment to the content of the span to suppress tidy5 # empty tag warning ln_tag.append(soup.new_string("", bs4.Comment)) pre_tag.code.append(ln_tag) def process_footnote_backlinks(soup): """Add class attribute "footnotes-backlink" to each footnote backlink.""" for footnotes in soup.find_all("div", attrs={"class": "footnotes"}): for fn_a_tag in footnotes.find_all(lambda tag: tag.name == "a" and tag.has_attr("href") and tag["href"].startswith("#fnref") and tag.string == "\u21A9"): # U+21A9: LEFTWARDS ARROW WITH HOOK fn_a_tag["class"] = "footnotes-backlink" fn_a_tag.string = "\u21A9\uFE0E" # U+FE0E: VARIATION SELECTOR-15 def postprocess_html_file(htmlfilepath): """Perform a series of postprocessing to an HTML file.""" with open(htmlfilepath, "r+", encoding="utf-8") as htmlfileobj: soup = bs4.BeautifulSoup(htmlfileobj.read(), "lxml") # a series of postprocessing (extensible) process_image_sizes(soup) link_img_tags(soup) process_footnote_backlinks(soup) # write back htmlfileobj.seek(0) htmlfileobj.write(str(soup)) htmlfileobj.truncate()