aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--generators/generators.py89
-rwxr-xr-xpyblog91
2 files changed, 90 insertions, 90 deletions
diff --git a/generators/generators.py b/generators/generators.py
index 1247a1c5..1792fa57 100644
--- a/generators/generators.py
+++ b/generators/generators.py
@@ -5,6 +5,9 @@ import sys
import tempfile
import re
+import bs4
+import dateutil
+
from config.config import *
@@ -195,3 +198,89 @@ def generate_blog_list(feed):
with open("build/blog/index.html", 'w', encoding='utf-8') as indexmd:
for line in lines:
indexmd.write(re.sub(r'{% generate blog_list here %}', div_blog_list, line))
+
+
+def generate_notes_list():
+ """"Generate notes list """
+
+ sys.stderr.write("generating notes list\n")
+
+ html_fileList = []
+ for root, dirs, files in os.walk(BUILDDIR):
+ for name in files:
+ if re.search(r'notes',root):
+ if name.endswith(".html"):
+ try:
+ html_fileList.append(os.path.join(root.split('notes/')[1], name))
+ except IndexError:
+ html_fileList.append(name)
+
+ div_notes_list = u'<div class="blog-index" id="toc">\n</table>\n'
+ year = 10000 # will be larger than the latest year for quite a while
+ # recall that entries are in reverse chronological order
+ table_opened = False
+ for name in list(reversed(sorted(os.listdir(os.path.join(BUILDDIR, "notes"))))):
+ if re.match(r"^[0-9]{4}-[0-9]{2}-[0-9]{2}.*\.html", name):
+ htmlpath = os.path.join(BUILDDIR, "notes", name)
+ #tentry = AtomEntry()
+ #item = RssItem()
+ try:
+ with open(htmlpath, encoding="utf-8") as htmlfile:
+ soup = bs4.BeautifulSoup(htmlfile.read(), "lxml")
+ # generate atom entry
+ #entry.author = copy.deepcopy(feed.author) # assume it's always the same author
+ #entry_url = urllib.parse.urljoin(BLOG_HOME, "blog/%s" % name)
+ #entry.id_text = entry_url
+ #entry.id = ET.Element("id")
+ #entry.id.text = entry_url
+ relpath = "/notes/%s" % name
+
+ #entry.link = ET.Element("link", href=entry_url)
+ title_text = soup.title.text
+
+ #entry.title = ET.Element("title", type="html")
+ #entry.title.text = entry.title_text
+ post_date = soup.find("meta", attrs={"name": "date"})["content"]
+ updated_datetime = dateutil.parser.parse(post_date)
+
+ date = updated_datetime
+ if date.year < year:
+ # close the previous table if there is one
+ if table_opened:
+ div_notes_list += u'</table>\n'
+ # write a new <h2 class="blog-index-year-title"> tag with the smaller year
+ year = date.year
+ div_notes_list += u'\n<h2 class="blog-index-year-title" id="{0}"><span class="left-h2">.:</span><span class="title-h2">{0}</span><span class="right-h2">:.</span></h2>\n\n'.format(year)
+ div_notes_list += u'<table class="blog-index-yearly-index">\n'
+ table_opened = True
+
+ # write a new table row entry in Markdown, in the format:
+ #
+ # <tr>
+ # <td class="blog-index-post-date"><time class="date" datetime="2015-05-05T00:06:04-0700">May 5</time></td>
+ # <td class="blog-index-post-title">[Blah blah](/blog/2015-05-04-blah-blah.html)</td>
+ # </tr>
+ monthday = date.strftime("%b %d")
+ div_notes_list += (u'<tr><td class="blog-index-post-date"><time class="date" datetime="%s">%s</time></td>'
+ '<td class="blog-index-post-title"><a href="%s">%s</a></td></tr>\n' %
+ (date.isoformat(), monthday, relpath, title_text))
+
+ except Exception:
+ sys.stderr.write("error: failed to generate feed entry from %s\n" % name)
+ with open(htmlpath, encoding="utf-8") as htmlfile:
+ sys.stderr.write("dumping HTML:%s\n\n" % htmlfile.read())
+ raise
+
+ if table_opened:
+ div_notes_list += u'</table>\n'
+ div_notes_list += u'</div>'
+
+ fd, tmppath = tempfile.mkstemp()
+ os.close(fd)
+ with open(tmppath, 'w', encoding='utf-8') as tmpfile:
+ if os.path.exists("build/notes/index.html"):
+ with open("build/notes/index.html", 'r', encoding='utf-8') as indexmd:
+ lines = indexmd.readlines()
+ with open("build/notes/index.html", 'w', encoding='utf-8') as indexmd:
+ for line in lines:
+ indexmd.write(re.sub(r'{% generate notes_list here %}', div_notes_list, line))
diff --git a/pyblog b/pyblog
index c27edd1b..d1e83ba0 100755
--- a/pyblog
+++ b/pyblog
@@ -51,95 +51,6 @@ from config.config import *
from generators import generators
-def generate_notes_list():
- """"Generate notes list """
-
- sys.stderr.write("generating notes list\n")
-
- html_fileList = []
- for root, dirs, files in os.walk(BUILDDIR):
- for name in files:
- if re.search(r'notes',root):
- if name.endswith(".html"):
- try:
- html_fileList.append(os.path.join(root.split('notes/')[1], name))
- except IndexError:
- html_fileList.append(name)
-
- div_notes_list = u'<div class="blog-index" id="toc">\n</table>\n'
- year = 10000 # will be larger than the latest year for quite a while
- # recall that entries are in reverse chronological order
- table_opened = False
- for name in list(reversed(sorted(os.listdir(os.path.join(BUILDDIR, "notes"))))):
- if re.match(r"^[0-9]{4}-[0-9]{2}-[0-9]{2}.*\.html", name):
- htmlpath = os.path.join(BUILDDIR, "notes", name)
- #tentry = AtomEntry()
- #item = RssItem()
- try:
- with open(htmlpath, encoding="utf-8") as htmlfile:
- soup = bs4.BeautifulSoup(htmlfile.read(), "lxml")
- # generate atom entry
- #entry.author = copy.deepcopy(feed.author) # assume it's always the same author
- #entry_url = urllib.parse.urljoin(BLOG_HOME, "blog/%s" % name)
- #entry.id_text = entry_url
- #entry.id = ET.Element("id")
- #entry.id.text = entry_url
- relpath = "/notes/%s" % name
-
- #entry.link = ET.Element("link", href=entry_url)
- title_text = soup.title.text
-
- #entry.title = ET.Element("title", type="html")
- #entry.title.text = entry.title_text
- post_date = soup.find("meta", attrs={"name": "date"})["content"]
- updated_datetime = dateutil.parser.parse(post_date)
-
- date = updated_datetime
- if date.year < year:
- # close the previous table if there is one
- if table_opened:
- div_notes_list += u'</table>\n'
- # write a new <h2 class="blog-index-year-title"> tag with the smaller year
- year = date.year
- div_notes_list += u'\n<h2 class="blog-index-year-title" id="{0}"><span class="left-h2">.:</span><span class="title-h2">{0}</span><span class="right-h2">:.</span></h2>\n\n'.format(year)
- div_notes_list += u'<table class="blog-index-yearly-index">\n'
- table_opened = True
-
- # write a new table row entry in Markdown, in the format:
- #
- # <tr>
- # <td class="blog-index-post-date"><time class="date" datetime="2015-05-05T00:06:04-0700">May 5</time></td>
- # <td class="blog-index-post-title">[Blah blah](/blog/2015-05-04-blah-blah.html)</td>
- # </tr>
- monthday = date.strftime("%b %d")
- div_notes_list += (u'<tr><td class="blog-index-post-date"><time class="date" datetime="%s">%s</time></td>'
- '<td class="blog-index-post-title"><a href="%s">%s</a></td></tr>\n' %
- (date.isoformat(), monthday, relpath, title_text))
-
- except Exception:
- sys.stderr.write("error: failed to generate feed entry from %s\n" % name)
- with open(htmlpath, encoding="utf-8") as htmlfile:
- sys.stderr.write("dumping HTML:%s\n\n" % htmlfile.read())
- raise
-
- if table_opened:
- div_notes_list += u'</table>\n'
- div_notes_list += u'</div>'
-
- fd, tmppath = tempfile.mkstemp()
- os.close(fd)
- with open(tmppath, 'w', encoding='utf-8') as tmpfile:
- if os.path.exists("build/notes/index.html"):
- with open("build/notes/index.html", 'r', encoding='utf-8') as indexmd:
- lines = indexmd.readlines()
- with open("build/notes/index.html", 'w', encoding='utf-8') as indexmd:
- for line in lines:
- indexmd.write(re.sub(r'{% generate notes_list here %}', div_notes_list, line))
-
-
-
-
-
def generate_index(feed):
"""Generate index.html from index.md and a TOC."""
@@ -507,7 +418,7 @@ def generate_index_and_feed():
generators.generate_menu()
generators.generate_table()
generators.generate_blog_list(feed)
- generate_notes_list()
+ generators.generate_notes_list()
rewrite_title()
feed.updated_datetime = utils.current_datetime()