From 951971a6bf173ca8cc601abdd9ca8d33da93e657 Mon Sep 17 00:00:00 2001
From: neodarz <neodarz@neodarz.net>
Date: Fri, 5 May 2017 23:29:08 +0200
Subject: Adapt design

---
 pyblog | 81 ++++++++++++++++++++++++++++++++++++++++++++++--------------------
 1 file changed, 57 insertions(+), 24 deletions(-)

(limited to 'pyblog')

diff --git a/pyblog b/pyblog
index 4c14e9bf..7e1a4970 100755
--- a/pyblog
+++ b/pyblog
@@ -37,6 +37,9 @@ import dateutil.parser
 import dateutil.tz
 import lxml.etree as ET
 
+from bs4 import UnicodeDammit
+from pprint import pprint
+
 ############################# BLOG CONFIGURATIONS ##############################
 # Safe to customize
 BLOG_HOME = "http://neodarz.net/"
@@ -306,7 +309,7 @@ def generate_menu():
     # Generate the a string who contain a list of file in website folder
     htmly_website_page = "<ul>"
     for name in os.listdir(os.path.join(BUILDDIR, "website")):
-        htmly_website_page += "<li><a href='/website/"+name+"'>"+name.split('.html')[0]+"</a></li>"
+        htmly_website_page += "<a href='/website/"+name+"' class='lia'><li><span class='left-lia'></span><span class='center-lia'>"+name.split('.html')[0]+"</span><span class='right-lia'></span></li></a>"
     htmly_website_page += "</ul>"
 
     # Write the menu in file, in place of the <generated_menu>
@@ -627,33 +630,63 @@ def rewrite_title():
                     except IndexError:
                         filenames.append(name)
 
+    h1_titles_list = []
+    h1_title = []
     fd, tmppath = tempfile.mkstemp()
     os.close(fd)
     for filename in filenames:
         soup = bs4.BeautifulSoup(open(filename), "lxml")
-        for h1 in soup.find_all("h1"):
-            if re.match("^(?!.*article-title).*$", str(h1)):
-                print(h1)
-                h1_id = h1['id']
-                h1_name = h1.string
-                h1_ = soup.new_tag("h1")
-                h1_['id'] = h1_id
-                h1_.string = h1_name
-                h1.name = "span"
-                h1.string = ""
-                h1['class'] = "left-h1"
-                del h1['id']
-                h1.string.insert_before(h1_)
-                print(h1)
-
-        #with open(tmppath, 'w', encoding='utf-8') as tmpfile:
-        #    if os.path.exists(filename):
-        #    #    with open(filename, 'r', encoding='utf-8') as indexmd:
-                #    lines = indexmd.readlines()
-                #    with open(filename, 'w', encoding='utf-8') as indexmd:
-                        #for line in lines:
-                            #indexmd.write(re.sub(r'\<pre class="header"\>\<code\>(.+?)\<\/code\>\<\/pre\>', '<pre class="header"><code>'+f.renderText('Miou')+'(^._.^)ノ</code></pre>', line))
-                            #indexmd.write(re.suv(r'\<code class="sourceCode diff"\>', '\<code class="sourceCode diff" id="AsourceCode"\>', line))
+        for myh1 in soup.find_all("h1"):
+            if re.match("^(?!.*article-title).*$", str(myh1)):
+                h1_id = myh1['id']
+                h1_name = myh1.string
+
+                h1_title.append(str(myh1))
+
+
+                myh1['class'] = "h1"
+                myh1.string = ""
+
+                h1_span_left = soup.new_tag("span")
+                h1_span_left['class'] = "left-h1"
+                h1_span_left.string = "█▓▒░"
+
+                h1_span_title = soup.new_tag("span")
+                h1_span_title['class'] = "title-h1"
+                h1_span_title.string = "「"+h1_name+"」"
+
+                h1_span_right = soup.new_tag("span")
+                h1_span_right['class'] = "right-h1"
+                h1_span_right.string = "░▒▓█"
+
+                myh1.string.insert_before(h1_span_left)
+                myh1.span.insert_after(h1_span_right)
+                myh1.span.insert_after(h1_span_title)
+
+                h1_title.append(myh1)
+                h1_title.append(h1_name)
+
+                h1_titles_list.append(h1_title)
+            h1_title = []
+
+    tested_title_list = []
+    tested_title = []
+    for filename in filenames:
+        soup = bs4.BeautifulSoup(open(filename), "lxml")
+        if os.path.exists(filename):
+            with open(filename, 'r', encoding='utf-8') as indexmd:
+                lines = indexmd.readlines()
+
+                with open(filename, 'w', encoding='utf-8') as indexmd:
+                    for line in lines:
+                        string = ""
+                        for title in h1_titles_list:
+                            if re.match(".*"+title[0]+".*", line):
+                                string = str(title[1])
+                        if string != "":
+                            indexmd.write(re.sub(line, string, line))
+                        else:
+                            indexmd.write(line)
 
 
 def absolutify_links(soup, baseurl):
-- 
cgit v1.2.1