From 46c7d47ff182e2d0fe831f3546041487a8952de0 Mon Sep 17 00:00:00 2001
From: neodarz <neodarz@neodarz.net>
Date: Sun, 26 May 2019 16:00:44 +0200
Subject: Move rewrite_title to external file

---
 generators/generators.py | 134 ++++++++++++++++++++++++++++++++++++++++++++++
 pyblog                   | 135 +----------------------------------------------
 2 files changed, 135 insertions(+), 134 deletions(-)

diff --git a/generators/generators.py b/generators/generators.py
index 8887bddb..9ad773c2 100644
--- a/generators/generators.py
+++ b/generators/generators.py
@@ -383,3 +383,137 @@ def generate_sitemap(feed):
         sitemapfile.write('<?xml version="1.0" encoding="UTF-8"?>\n%s\n' %
                           ET.tostring(sitemap).decode('utf-8'))
         sys.stderr.write("wrote sitemap.xml\n")
+
+
+def rewrite_title():
+    """Override the title of some page for a better render"""
+    sys.stderr.write("Overriding some titles\n")
+
+    filenames =['build/index.html',
+                'build/blog/index.html',
+                'build/notes/index.html']
+
+    for root, dirs, files in os.walk(BUILDDIR):
+        for name in files:
+            if re.search(r'website($)',root):
+                if name.endswith(".html"):
+                    try:
+                        filenames.append("build"+os.path.join(root.split(BUILDDIR)[1], name))
+                    except IndexError:
+                        filenames.append(name)
+            if re.search(r'Documents($)',root):
+                if name.endswith(".html"):
+                    try:
+                        filenames.append("build"+os.path.join(root.split(BUILDDIR)[1], name))
+                    except IndexError:
+                        filenames.append(name)
+            if re.search(r'notes($)',root):
+                if name.endswith(".html"):
+                    try:
+                        filenames.append("build"+os.path.join(root.split(BUILDDIR)[1], name))
+                    except IndexError:
+                        filenames.append(name)
+            if re.search(r'blog($)',root):
+                if name.endswith(".html"):
+                    try:
+                        filenames.append("build"+os.path.join(root.split(BUILDDIR)[1], name))
+                    except IndexError:
+                        filenames.append(name)
+
+    h1_titles_list = []
+    h1_title = []
+
+    h2_titles_list = []
+    h2_title = []
+
+    fd, tmppath = tempfile.mkstemp()
+    os.close(fd)
+    for filename in filenames:
+        soup = bs4.BeautifulSoup(open(filename), "lxml")
+        for myh1 in soup.find_all("h1"):
+            if re.match("^(?!.*article-title).*$", str(myh1)):
+                h1_id = myh1['id']
+                h1_name = myh1.string
+
+                h1_title.append(str(myh1))
+
+
+                myh1['class'] = "h1"
+                myh1.string = ""
+
+                h1_span_left = soup.new_tag("span")
+                h1_span_left['class'] = "left-h1"
+                h1_span_left.string = "█▓▒░"
+
+                h1_span_title = soup.new_tag("span")
+                h1_span_title['class'] = "title-h1"
+                h1_span_title.string = "「"+h1_name+"」"
+
+                h1_span_right = soup.new_tag("span")
+                h1_span_right['class'] = "right-h1"
+                h1_span_right.string = "░▒▓█"
+
+                myh1.string.insert_before(h1_span_left)
+                myh1.span.insert_after(h1_span_right)
+                myh1.span.insert_after(h1_span_title)
+
+                h1_title.append(myh1)
+                h1_title.append(h1_name)
+
+                h1_titles_list.append(h1_title)
+            h1_title = []
+
+        for myh2 in soup.find_all("h2"):
+            if re.match("^(?!.*blog-index-year-title).*$", str(myh2)):
+                h2_id = myh2['id']
+                h2_name = myh2.string
+
+                h2_title.append(str(myh2))
+
+
+                myh2['class'] = "h2"
+                myh2.string = ""
+
+                h2_span_left = soup.new_tag("span")
+                h2_span_left['class'] = "left-h2"
+                h2_span_left.string = ".: "
+
+                h2_span_title = soup.new_tag("span")
+                h2_span_title['class'] = "title-h2"
+                h2_span_title.string = h2_name
+
+                h2_span_right = soup.new_tag("span")
+                h2_span_right['class'] = "right-h2"
+                h2_span_right.string = " :."
+
+                myh2.string.insert_before(h2_span_left)
+                myh2.span.insert_after(h2_span_right)
+                myh2.span.insert_after(h2_span_title)
+
+                h2_title.append(myh2)
+                h2_title.append(h2_name)
+
+                h2_titles_list.append(h2_title)
+            h2_title = []
+
+    tested_title_list = []
+    tested_title = []
+    for filename in filenames:
+        soup = bs4.BeautifulSoup(open(filename), "lxml")
+        if os.path.exists(filename):
+            with open(filename, 'r', encoding='utf-8') as indexmd:
+                lines = indexmd.readlines()
+
+                with open(filename, 'w', encoding='utf-8') as indexmd:
+                    for line in lines:
+                        string = ""
+                        for title in h1_titles_list:
+                            if re.match(".*"+title[0]+".*", line):
+                                string = str(title[1])
+                        for title in h2_titles_list:
+                            if re.match(".*"+title[0]+".*", line):
+                                string = str(title[1])
+                        if string != "":
+                            indexmd.write(re.sub(line, string, line))
+                        else:
+                            indexmd.write(line)
diff --git a/pyblog b/pyblog
index 97a88470..11ea868b 100755
--- a/pyblog
+++ b/pyblog
@@ -51,139 +51,6 @@ from config.config import *
 from generators import generators
 
 
-def rewrite_title():
-    """Override the title of some page for a better render"""
-    sys.stderr.write("Overriding some titles\n")
-
-    filenames =['build/index.html',
-                'build/blog/index.html',
-                'build/notes/index.html']
-
-    for root, dirs, files in os.walk(BUILDDIR):
-        for name in files:
-            if re.search(r'website($)',root):
-                if name.endswith(".html"):
-                    try:
-                        filenames.append("build"+os.path.join(root.split(BUILDDIR)[1], name))
-                    except IndexError:
-                        filenames.append(name)
-            if re.search(r'Documents($)',root):
-                if name.endswith(".html"):
-                    try:
-                        filenames.append("build"+os.path.join(root.split(BUILDDIR)[1], name))
-                    except IndexError:
-                        filenames.append(name)
-            if re.search(r'notes($)',root):
-                if name.endswith(".html"):
-                    try:
-                        filenames.append("build"+os.path.join(root.split(BUILDDIR)[1], name))
-                    except IndexError:
-                        filenames.append(name)
-            if re.search(r'blog($)',root):
-                if name.endswith(".html"):
-                    try:
-                        filenames.append("build"+os.path.join(root.split(BUILDDIR)[1], name))
-                    except IndexError:
-                        filenames.append(name)
-
-    h1_titles_list = []
-    h1_title = []
-
-    h2_titles_list = []
-    h2_title = []
-
-    fd, tmppath = tempfile.mkstemp()
-    os.close(fd)
-    for filename in filenames:
-        soup = bs4.BeautifulSoup(open(filename), "lxml")
-        for myh1 in soup.find_all("h1"):
-            if re.match("^(?!.*article-title).*$", str(myh1)):
-                h1_id = myh1['id']
-                h1_name = myh1.string
-
-                h1_title.append(str(myh1))
-
-
-                myh1['class'] = "h1"
-                myh1.string = ""
-
-                h1_span_left = soup.new_tag("span")
-                h1_span_left['class'] = "left-h1"
-                h1_span_left.string = "█▓▒░"
-
-                h1_span_title = soup.new_tag("span")
-                h1_span_title['class'] = "title-h1"
-                h1_span_title.string = "「"+h1_name+"」"
-
-                h1_span_right = soup.new_tag("span")
-                h1_span_right['class'] = "right-h1"
-                h1_span_right.string = "░▒▓█"
-
-                myh1.string.insert_before(h1_span_left)
-                myh1.span.insert_after(h1_span_right)
-                myh1.span.insert_after(h1_span_title)
-
-                h1_title.append(myh1)
-                h1_title.append(h1_name)
-
-                h1_titles_list.append(h1_title)
-            h1_title = []
-
-        for myh2 in soup.find_all("h2"):
-            if re.match("^(?!.*blog-index-year-title).*$", str(myh2)):
-                h2_id = myh2['id']
-                h2_name = myh2.string
-
-                h2_title.append(str(myh2))
-
-
-                myh2['class'] = "h2"
-                myh2.string = ""
-
-                h2_span_left = soup.new_tag("span")
-                h2_span_left['class'] = "left-h2"
-                h2_span_left.string = ".: "
-
-                h2_span_title = soup.new_tag("span")
-                h2_span_title['class'] = "title-h2"
-                h2_span_title.string = h2_name
-
-                h2_span_right = soup.new_tag("span")
-                h2_span_right['class'] = "right-h2"
-                h2_span_right.string = " :."
-
-                myh2.string.insert_before(h2_span_left)
-                myh2.span.insert_after(h2_span_right)
-                myh2.span.insert_after(h2_span_title)
-
-                h2_title.append(myh2)
-                h2_title.append(h2_name)
-
-                h2_titles_list.append(h2_title)
-            h2_title = []
-
-    tested_title_list = []
-    tested_title = []
-    for filename in filenames:
-        soup = bs4.BeautifulSoup(open(filename), "lxml")
-        if os.path.exists(filename):
-            with open(filename, 'r', encoding='utf-8') as indexmd:
-                lines = indexmd.readlines()
-
-                with open(filename, 'w', encoding='utf-8') as indexmd:
-                    for line in lines:
-                        string = ""
-                        for title in h1_titles_list:
-                            if re.match(".*"+title[0]+".*", line):
-                                string = str(title[1])
-                        for title in h2_titles_list:
-                            if re.match(".*"+title[0]+".*", line):
-                                string = str(title[1])
-                        if string != "":
-                            indexmd.write(re.sub(line, string, line))
-                        else:
-                            indexmd.write(line)
-
 
 def generate_index_and_feed():
     """Generate index.html and feeds (atom and rss)."""
@@ -327,7 +194,7 @@ def generate_index_and_feed():
     generators.generate_table()
     generators.generate_blog_list(feed)
     generators.generate_notes_list()
-    rewrite_title()
+    generators.rewrite_title()
 
     feed.updated_datetime = utils.current_datetime()
     feed.updated = ET.Element("updated")
-- 
cgit v1.2.1