diff options
author | neodarz <neodarz@neodarz.net> | 2019-05-26 18:11:16 +0200 |
---|---|---|
committer | neodarz <neodarz@neodarz.net> | 2019-05-26 18:11:16 +0200 |
commit | 7a9915632b9e451502d9c46407dd58e7deb4640e (patch) | |
tree | 4caac7feaa77ab87c4d0273e044fb7f4cf00ccf6 | |
parent | 6a743a7ef8920cca31b833d26a093b84832b0362 (diff) | |
download | my_new_personal_website-7a9915632b9e451502d9c46407dd58e7deb4640e.tar.xz my_new_personal_website-7a9915632b9e451502d9c46407dd58e7deb4640e.zip |
Move list_posts to external file
-rwxr-xr-x | pyblog | 40 | ||||
-rw-r--r-- | utils/utils.py | 41 |
2 files changed, 42 insertions, 39 deletions
@@ -53,44 +53,6 @@ from generators import generators from cli import cli -def list_posts(): - """List all posts, with date, title, and path to source file. - - This function only lists posts that has been built (since it reads - metadata from HTML rather than Markdown). - - Returns - ------- - posts : list - A list of posts, in reverse chronological order, where each - element is a tuple of (date, title, path to source file). - - """ - posts = [] - for name in os.listdir(os.path.join(BUILDDIR, "blog")): - if not re.match(r"^[0-9]{4}-[0-9]{2}-[0-9]{2}.*\.html", name): - continue - - htmlpath = os.path.join(BUILDDIR, "blog", name) - entry = AtomEntry() - item = RssItem() - try: - with open(htmlpath, encoding="utf-8") as htmlfile: - soup = bs4.BeautifulSoup(htmlfile.read(), "lxml") - title = soup.title.text - date = dateutil.parser.parse(soup.find("meta", attrs={"name": "date"})["content"]) - source_path = os.path.join(POSTSDIR, re.sub(r'.html$', '.md', name)) - posts.append((date, title, source_path)) - except Exception: - sys.stderr.write("error: failed to read metadata from HTML file %s\n" % name) - with open(htmlpath, encoding="utf-8") as htmlfile: - sys.stderr.write("dumping HTML:%s\n\n" % htmlfile.read()) - raise - - posts.sort(key=lambda post: post[0], reverse=True) - return posts - - class PostSelector: def __init__(self, term, posts): @@ -231,7 +193,7 @@ class PostSelector: def edit_existing_post(args): - selector = PostSelector(blessed.Terminal(), list_posts()) + selector = PostSelector(blessed.Terminal(), utils.list_posts()) selection = selector.select() if selection: print(selection) diff --git a/utils/utils.py b/utils/utils.py index 1fdea5d9..f8a756c5 100644 --- a/utils/utils.py +++ b/utils/utils.py @@ -19,6 +19,9 @@ import multiprocessing import os import sys +from config.config import * +from rss import * + @contextmanager def init_colorama(): """Set global foreground modifying ANSI codes. @@ -257,3 +260,41 @@ class HTTPServerProcess(multiprocessing.Process): httpd.serve_forever() except KeyboardInterrupt: httpd.shutdown() + + +def list_posts(): + """List all posts, with date, title, and path to source file. + + This function only lists posts that has been built (since it reads + metadata from HTML rather than Markdown). + + Returns + ------- + posts : list + A list of posts, in reverse chronological order, where each + element is a tuple of (date, title, path to source file). + + """ + posts = [] + for name in os.listdir(os.path.join(BUILDDIR, "blog")): + if not re.match(r"^[0-9]{4}-[0-9]{2}-[0-9]{2}.*\.html", name): + continue + + htmlpath = os.path.join(BUILDDIR, "blog", name) + entry = AtomEntry() + item = RssItem() + try: + with open(htmlpath, encoding="utf-8") as htmlfile: + soup = bs4.BeautifulSoup(htmlfile.read(), "lxml") + title = soup.title.text + date = dateutil.parser.parse(soup.find("meta", attrs={"name": "date"})["content"]) + source_path = os.path.join(POSTSDIR, re.sub(r'.html$', '.md', name)) + posts.append((date, title, source_path)) + except Exception: + sys.stderr.write("error: failed to read metadata from HTML file %s\n" % name) + with open(htmlpath, encoding="utf-8") as htmlfile: + sys.stderr.write("dumping HTML:%s\n\n" % htmlfile.read()) + raise + + posts.sort(key=lambda post: post[0], reverse=True) + return posts |