#!/bin/python3
import os
import sys
import tempfile
import re
import bs4
import dateutil
import io
import subprocess
import lxml.etree as ET
import urllib.parse
import copy
import email.utils
import time
from rss import *
from config.config import *
from utils import utils
def generate_menu():
"""Generate menu."""
sys.stderr.write("generating menu\n")
fd, tmppath = tempfile.mkstemp()
os.close(fd)
# Put in a list the pages where the menu will be written
html_fileList = []
for root, dirs, files in os.walk(BUILDDIR):
for name in files:
if name.endswith(".html"):
try:
html_fileList.append(os.path.join(root.split('build/')[1], name))
except IndexError:
html_fileList.append(name)
# Generate the string who contain the links of the menu
htmly_website_page = "
"
for name in sorted(os.listdir(os.path.join(BUILDDIR, "website"))):
if name != "Documents":
htmly_website_page += "
"
# Writing the menu in all pages contained in the variable in place of the -- generate menu here --
for html_file in html_fileList:
with open(tmppath, 'w', encoding='utf-8') as tmpfile:
if os.path.exists("build/"+html_file):
with open("build/"+html_file, 'r', encoding='utf-8') as indexmd:
lines = indexmd.readlines()
with open("build/"+html_file, 'w', encoding='utf-8') as indexmd:
for line in lines:
indexmd.write(re.sub(r'-- generate menu here --', htmly_website_page, line))
os.remove(tmppath)
def generate_table():
"""Generate table."""
first_comp = 1
first_pr = 1
tr_class = "odd"
documents_fileList = []
documents_fileList.append("/website/bts-sio.html")
fd, tmppath = tempfile.mkstemp()
os.close(fd)
htmly_website_page = ""
if os.path.exists(BUILDDIR+"/website/bts-sio.html"):
sys.stderr.write("generating table\n")
# Put in a list the pages where the menu will be written
#for root, dirs, files in os.walk(BUILDDIR+"/website/Documents/Situation2"):
# for name in files:
# if name.endswith(".html"):
# try:
# documents_fileList.append(os.path.join(root.split('build')[1], name))
# except IndexError:
# documents_fileList.append(name)
# Generate the string who contain the links of the menu
#htmly_website_page = "
"
#for name in os.listdir(os.path.join(BUILDDIR, "website/Documents/Situation2")):
# htmly_website_page += "
"
# Writing the menu in all pages contained in the variable in place of the -- generate submenu here --
for document_file in documents_fileList:
with open(tmppath, 'w', encoding='utf-8') as tmpfile:
if os.path.exists("build"+document_file):
with open("build"+document_file, 'r', encoding='utf-8') as indexmd:
lines = indexmd.readlines()
with open("build"+document_file, 'w', encoding='utf-8') as indexmd:
for line in lines:
indexmd.write(re.sub(r'
-- table --', '
Compétence
Activité
Justification
', line))
with open("build"+document_file, 'r', encoding='utf-8') as indexmd:
lines = indexmd.readlines()
with open("build"+document_file, 'w', encoding='utf-8') as indexmd:
for line in lines:
if (re.match('^\$.*', line) and first_pr == 1):
line_edited='
'
indexmd.write(re.sub(r'^\$.*', line_edited, line))
else:
indexmd.write(line)
with open("build"+document_file, 'r', encoding='utf-8') as indexmd:
lines = indexmd.readlines()
with open("build"+document_file, 'w', encoding='utf-8') as indexmd:
for line in lines:
if (re.match('^ \$.*\$$', line)):
indexmd.write(re.sub(r'^ \$.*\$$', "
', line))
else:
indexmd.write(line)
with open("build"+document_file, 'r', encoding='utf-8') as indexmd:
lines = indexmd.readlines()
with open("build"+document_file, 'w', encoding='utf-8') as indexmd:
for line in lines:
if (re.match('^ \$.*', line)):
indexmd.write(re.sub(r'^ \$.*', "
"+line.split("$")[1]+"
", line))
else:
indexmd.write(re.sub(r'^ \$.*', "
"+line+"
", line))
with open("build"+document_file, 'r', encoding='utf-8') as indexmd:
lines = indexmd.readlines()
with open("build"+document_file, 'w', encoding='utf-8') as indexmd:
for line in lines:
indexmd.write(re.sub(r"-- end table --", "
", line))
os.remove(tmppath)
def generate_blog_list(feed):
""""Generate blog list """
sys.stderr.write("generating blog list\n")
html_fileList = []
for root, dirs, files in os.walk(BUILDDIR):
for name in files:
if re.search(r'blog',root):
if name.endswith(".html"):
try:
html_fileList.append(os.path.join(root.split('blog/')[1], name))
except IndexError:
html_fileList.append(name)
# generate TOC
for html_file in html_fileList:
div_blog_list = u'
\n\n'
year = 10000 # will be larger than the latest year for quite a while
# recall that entries are in reverse chronological order
table_opened = False
for entry in feed.entries:
date = entry.updated_datetime
if date.year < year:
# close the previous table if there is one
if table_opened:
div_blog_list += u'\n'
# write a new
tag with the smaller year
year = date.year
div_blog_list += u'\n
.:{0}:.
\n\n'.format(year)
div_blog_list += u'
\n'
table_opened = True
# write a new table row entry in Markdown, in the format:
#
#
'
fd, tmppath = tempfile.mkstemp()
os.close(fd)
with open(tmppath, 'w', encoding='utf-8') as tmpfile:
if os.path.exists("build/blog/index.html"):
with open("build/blog/index.html", 'r', encoding='utf-8') as indexmd:
lines = indexmd.readlines()
with open("build/blog/index.html", 'w', encoding='utf-8') as indexmd:
for line in lines:
indexmd.write(re.sub(r'{% generate blog_list here %}', div_blog_list, line))
def generate_notes_list():
""""Generate notes list """
sys.stderr.write("generating notes list\n")
html_fileList = []
for root, dirs, files in os.walk(BUILDDIR):
for name in files:
if re.search(r'notes',root):
if name.endswith(".html"):
try:
html_fileList.append(os.path.join(root.split('notes/')[1], name))
except IndexError:
html_fileList.append(name)
div_notes_list = u'
\n\n'
year = 10000 # will be larger than the latest year for quite a while
# recall that entries are in reverse chronological order
table_opened = False
for name in list(reversed(sorted(os.listdir(os.path.join(BUILDDIR, "notes"))))):
if re.match(r"^[0-9]{4}-[0-9]{2}-[0-9]{2}.*\.html", name):
htmlpath = os.path.join(BUILDDIR, "notes", name)
#tentry = AtomEntry()
#item = RssItem()
try:
with open(htmlpath, encoding="utf-8") as htmlfile:
soup = bs4.BeautifulSoup(htmlfile.read(), "lxml")
# generate atom entry
#entry.author = copy.deepcopy(feed.author) # assume it's always the same author
#entry_url = urllib.parse.urljoin(BLOG_HOME, "blog/%s" % name)
#entry.id_text = entry_url
#entry.id = ET.Element("id")
#entry.id.text = entry_url
relpath = "/notes/%s" % name
#entry.link = ET.Element("link", href=entry_url)
title_text = soup.title.text
#entry.title = ET.Element("title", type="html")
#entry.title.text = entry.title_text
post_date = soup.find("meta", attrs={"name": "date"})["content"]
updated_datetime = dateutil.parser.parse(post_date)
date = updated_datetime
if date.year < year:
# close the previous table if there is one
if table_opened:
div_notes_list += u'\n'
# write a new
tag with the smaller year
year = date.year
div_notes_list += u'\n
.:{0}:.
\n\n'.format(year)
div_notes_list += u'
\n'
table_opened = True
# write a new table row entry in Markdown, in the format:
#
#
\n' %
(date.isoformat(), monthday, relpath, title_text))
except Exception:
sys.stderr.write("error: failed to generate feed entry from %s\n" % name)
with open(htmlpath, encoding="utf-8") as htmlfile:
sys.stderr.write("dumping HTML:%s\n\n" % htmlfile.read())
raise
if table_opened:
div_notes_list += u'
\n'
div_notes_list += u'
'
fd, tmppath = tempfile.mkstemp()
os.close(fd)
with open(tmppath, 'w', encoding='utf-8') as tmpfile:
if os.path.exists("build/notes/index.html"):
with open("build/notes/index.html", 'r', encoding='utf-8') as indexmd:
lines = indexmd.readlines()
with open("build/notes/index.html", 'w', encoding='utf-8') as indexmd:
for line in lines:
indexmd.write(re.sub(r'{% generate notes_list here %}', div_notes_list, line))
def generate_index(feed):
"""Generate index.html from index.md and a TOC."""
sys.stderr.write("generating index.html\n")
# generate TOC
tocbuff = io.StringIO()
tocbuff.write('
')
year = 10000 # will be larger than the latest year for quite a while
# recall that entries are in reverse chronological order
table_opened = False
for entry in feed.entries:
date = entry.updated_datetime
if date.year < year:
# close the previous table if there is one
if table_opened:
tocbuff.write(u'\n')
# write a new
tag with the smaller year
year = date.year
tocbuff.write(u'\n
.:{0}:.
\n\n'.format(year))
tocbuff.write(u'
\n')
table_opened = True
# write a new table row entry in Markdown, in the format:
#
#