aboutsummaryrefslogtreecommitdiff
path: root/generators
diff options
context:
space:
mode:
authorneodarz <neodarz@neodarz.net>2019-05-26 15:42:58 +0200
committerneodarz <neodarz@neodarz.net>2019-05-26 15:42:58 +0200
commit60e70f9e2b4b14ea5533531f7a30510866601b44 (patch)
tree212d5cf01f4a19298e1b29a66aa4faa1e08f263f /generators
parentf2d9f3041e857fe08359647671edfd1d75718ff1 (diff)
downloadmy_new_personal_website-60e70f9e2b4b14ea5533531f7a30510866601b44.tar.xz
my_new_personal_website-60e70f9e2b4b14ea5533531f7a30510866601b44.zip
Move generate_notes_list to external file
Diffstat (limited to 'generators')
-rw-r--r--generators/generators.py89
1 files changed, 89 insertions, 0 deletions
diff --git a/generators/generators.py b/generators/generators.py
index 1247a1c5..1792fa57 100644
--- a/generators/generators.py
+++ b/generators/generators.py
@@ -5,6 +5,9 @@ import sys
import tempfile
import re
+import bs4
+import dateutil
+
from config.config import *
@@ -195,3 +198,89 @@ def generate_blog_list(feed):
with open("build/blog/index.html", 'w', encoding='utf-8') as indexmd:
for line in lines:
indexmd.write(re.sub(r'{% generate blog_list here %}', div_blog_list, line))
+
+
+def generate_notes_list():
+ """"Generate notes list """
+
+ sys.stderr.write("generating notes list\n")
+
+ html_fileList = []
+ for root, dirs, files in os.walk(BUILDDIR):
+ for name in files:
+ if re.search(r'notes',root):
+ if name.endswith(".html"):
+ try:
+ html_fileList.append(os.path.join(root.split('notes/')[1], name))
+ except IndexError:
+ html_fileList.append(name)
+
+ div_notes_list = u'<div class="blog-index" id="toc">\n</table>\n'
+ year = 10000 # will be larger than the latest year for quite a while
+ # recall that entries are in reverse chronological order
+ table_opened = False
+ for name in list(reversed(sorted(os.listdir(os.path.join(BUILDDIR, "notes"))))):
+ if re.match(r"^[0-9]{4}-[0-9]{2}-[0-9]{2}.*\.html", name):
+ htmlpath = os.path.join(BUILDDIR, "notes", name)
+ #tentry = AtomEntry()
+ #item = RssItem()
+ try:
+ with open(htmlpath, encoding="utf-8") as htmlfile:
+ soup = bs4.BeautifulSoup(htmlfile.read(), "lxml")
+ # generate atom entry
+ #entry.author = copy.deepcopy(feed.author) # assume it's always the same author
+ #entry_url = urllib.parse.urljoin(BLOG_HOME, "blog/%s" % name)
+ #entry.id_text = entry_url
+ #entry.id = ET.Element("id")
+ #entry.id.text = entry_url
+ relpath = "/notes/%s" % name
+
+ #entry.link = ET.Element("link", href=entry_url)
+ title_text = soup.title.text
+
+ #entry.title = ET.Element("title", type="html")
+ #entry.title.text = entry.title_text
+ post_date = soup.find("meta", attrs={"name": "date"})["content"]
+ updated_datetime = dateutil.parser.parse(post_date)
+
+ date = updated_datetime
+ if date.year < year:
+ # close the previous table if there is one
+ if table_opened:
+ div_notes_list += u'</table>\n'
+ # write a new <h2 class="blog-index-year-title"> tag with the smaller year
+ year = date.year
+ div_notes_list += u'\n<h2 class="blog-index-year-title" id="{0}"><span class="left-h2">.:</span><span class="title-h2">{0}</span><span class="right-h2">:.</span></h2>\n\n'.format(year)
+ div_notes_list += u'<table class="blog-index-yearly-index">\n'
+ table_opened = True
+
+ # write a new table row entry in Markdown, in the format:
+ #
+ # <tr>
+ # <td class="blog-index-post-date"><time class="date" datetime="2015-05-05T00:06:04-0700">May 5</time></td>
+ # <td class="blog-index-post-title">[Blah blah](/blog/2015-05-04-blah-blah.html)</td>
+ # </tr>
+ monthday = date.strftime("%b %d")
+ div_notes_list += (u'<tr><td class="blog-index-post-date"><time class="date" datetime="%s">%s</time></td>'
+ '<td class="blog-index-post-title"><a href="%s">%s</a></td></tr>\n' %
+ (date.isoformat(), monthday, relpath, title_text))
+
+ except Exception:
+ sys.stderr.write("error: failed to generate feed entry from %s\n" % name)
+ with open(htmlpath, encoding="utf-8") as htmlfile:
+ sys.stderr.write("dumping HTML:%s\n\n" % htmlfile.read())
+ raise
+
+ if table_opened:
+ div_notes_list += u'</table>\n'
+ div_notes_list += u'</div>'
+
+ fd, tmppath = tempfile.mkstemp()
+ os.close(fd)
+ with open(tmppath, 'w', encoding='utf-8') as tmpfile:
+ if os.path.exists("build/notes/index.html"):
+ with open("build/notes/index.html", 'r', encoding='utf-8') as indexmd:
+ lines = indexmd.readlines()
+ with open("build/notes/index.html", 'w', encoding='utf-8') as indexmd:
+ for line in lines:
+ indexmd.write(re.sub(r'{% generate notes_list here %}', div_notes_list, line))