aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rwxr-xr-xpyblog40
-rw-r--r--utils/utils.py41
2 files changed, 42 insertions, 39 deletions
diff --git a/pyblog b/pyblog
index ceb018d4..e73059bc 100755
--- a/pyblog
+++ b/pyblog
@@ -53,44 +53,6 @@ from generators import generators
from cli import cli
-def list_posts():
- """List all posts, with date, title, and path to source file.
-
- This function only lists posts that has been built (since it reads
- metadata from HTML rather than Markdown).
-
- Returns
- -------
- posts : list
- A list of posts, in reverse chronological order, where each
- element is a tuple of (date, title, path to source file).
-
- """
- posts = []
- for name in os.listdir(os.path.join(BUILDDIR, "blog")):
- if not re.match(r"^[0-9]{4}-[0-9]{2}-[0-9]{2}.*\.html", name):
- continue
-
- htmlpath = os.path.join(BUILDDIR, "blog", name)
- entry = AtomEntry()
- item = RssItem()
- try:
- with open(htmlpath, encoding="utf-8") as htmlfile:
- soup = bs4.BeautifulSoup(htmlfile.read(), "lxml")
- title = soup.title.text
- date = dateutil.parser.parse(soup.find("meta", attrs={"name": "date"})["content"])
- source_path = os.path.join(POSTSDIR, re.sub(r'.html$', '.md', name))
- posts.append((date, title, source_path))
- except Exception:
- sys.stderr.write("error: failed to read metadata from HTML file %s\n" % name)
- with open(htmlpath, encoding="utf-8") as htmlfile:
- sys.stderr.write("dumping HTML:%s\n\n" % htmlfile.read())
- raise
-
- posts.sort(key=lambda post: post[0], reverse=True)
- return posts
-
-
class PostSelector:
def __init__(self, term, posts):
@@ -231,7 +193,7 @@ class PostSelector:
def edit_existing_post(args):
- selector = PostSelector(blessed.Terminal(), list_posts())
+ selector = PostSelector(blessed.Terminal(), utils.list_posts())
selection = selector.select()
if selection:
print(selection)
diff --git a/utils/utils.py b/utils/utils.py
index 1fdea5d9..f8a756c5 100644
--- a/utils/utils.py
+++ b/utils/utils.py
@@ -19,6 +19,9 @@ import multiprocessing
import os
import sys
+from config.config import *
+from rss import *
+
@contextmanager
def init_colorama():
"""Set global foreground modifying ANSI codes.
@@ -257,3 +260,41 @@ class HTTPServerProcess(multiprocessing.Process):
httpd.serve_forever()
except KeyboardInterrupt:
httpd.shutdown()
+
+
+def list_posts():
+ """List all posts, with date, title, and path to source file.
+
+ This function only lists posts that has been built (since it reads
+ metadata from HTML rather than Markdown).
+
+ Returns
+ -------
+ posts : list
+ A list of posts, in reverse chronological order, where each
+ element is a tuple of (date, title, path to source file).
+
+ """
+ posts = []
+ for name in os.listdir(os.path.join(BUILDDIR, "blog")):
+ if not re.match(r"^[0-9]{4}-[0-9]{2}-[0-9]{2}.*\.html", name):
+ continue
+
+ htmlpath = os.path.join(BUILDDIR, "blog", name)
+ entry = AtomEntry()
+ item = RssItem()
+ try:
+ with open(htmlpath, encoding="utf-8") as htmlfile:
+ soup = bs4.BeautifulSoup(htmlfile.read(), "lxml")
+ title = soup.title.text
+ date = dateutil.parser.parse(soup.find("meta", attrs={"name": "date"})["content"])
+ source_path = os.path.join(POSTSDIR, re.sub(r'.html$', '.md', name))
+ posts.append((date, title, source_path))
+ except Exception:
+ sys.stderr.write("error: failed to read metadata from HTML file %s\n" % name)
+ with open(htmlpath, encoding="utf-8") as htmlfile:
+ sys.stderr.write("dumping HTML:%s\n\n" % htmlfile.read())
+ raise
+
+ posts.sort(key=lambda post: post[0], reverse=True)
+ return posts