aboutsummaryrefslogtreecommitdiff
path: root/ryzomcoreConfluence.py
diff options
context:
space:
mode:
Diffstat (limited to 'ryzomcoreConfluence.py')
-rw-r--r--ryzomcoreConfluence.py130
1 files changed, 0 insertions, 130 deletions
diff --git a/ryzomcoreConfluence.py b/ryzomcoreConfluence.py
deleted file mode 100644
index 5d35c9f..0000000
--- a/ryzomcoreConfluence.py
+++ /dev/null
@@ -1,130 +0,0 @@
-# Doku Wiki
-#
-# @website https://www.dokuwiki.org/
-# @provide-api yes
-# (https://www.dokuwiki.org/devel:xmlrpc)
-#
-# @using-api no
-# @results HTML
-# @stable yes
-# @parse (general) url, title, content
-
-from urllib import urlencode
-from lxml.html import fromstring
-from searx.engines.xpath import extract_text
-from datetime import datetime
-
-# engine dependent config
-categories = ['general'] # TODO , 'images', 'music', 'videos', 'files'
-paging = False
-language_support = False
-number_of_results = 5
-
-# search-url
-# Doku is OpenSearch compatible
-base_url = 'https://ryzomcore.atlassian.net'
-search_url = '/wiki/dosearchsite.action?{query}'
-#search_url = '/wikhan/?do=search'\
-# '&{query}'
-# TODO '&startRecord={offset}'\
-# TODO '&maximumRecords={limit}'\
-
-
-# do search-request
-def request(query, params):
-
- params['url'] = base_url +\
- search_url.format(query=urlencode({'queryString': query}))
-
- return params
-
-
-# get response from search-request
-def response(resp):
- results = []
-
- doc = fromstring(resp.text)
-
- # parse results
- # Quickhits
- for r in doc.xpath('//ol[@class="search-results cql"]/li'):
- try:
- res_url = r.xpath('.//a[@class="search-result-link visitable"]/@href')[-1]
- except:
- continue
-
- if not res_url:
- continue
-
- title = extract_text(r.xpath('.//a[@class="search-result-link visitable"]'))
-
- content = extract_text(r.xpath('.//div[@class="highlights"]'))
-
- dataBrut = extract_text(r.xpath('.//span[@class="date"]'))
-
- #data = dataBrut.split('-')
-
-
-
- #date = '-'.join(dataS)
- #adatetime = data[1]
- #data = adatetime.split(',')
- #date = data[1]
- Thedate = dataBrut.split(' ')
- TheDay = Thedate[1].split(',')
-
-
- if Thedate[0] == "Jan":
- ThedateMonth = 1
- elif Thedate[0] == "Feb":
- ThedateMonth = 2
- elif Thedate[0] == "Mar":
- ThedateMonth = 3
- elif Thedate[0] == "Apr":
- ThedateMonth = 4
- elif Thedate[0] == "May":
- ThedateMonth = 5
- elif Thedate[0] == "Jun":
- ThedateMonth = 6
- elif Thedate[0] == "Jul":
- ThedateMonth = 7
- elif Thedate[0] == "Aug":
- ThedateMonth = 8
- elif Thedate[0] == "Sep":
- ThedateMonth = 9
- elif Thedate[0] == "Oct":
- ThedateMonth = 10
- elif Thedate[0] == "Nov":
- ThedateMonth = 11
- else:
- ThedateMonth = 12
-
- # append result
- results.append({'title': title,
- 'content': content,
- 'url': base_url + res_url,
- 'publishedDate': datetime(int(Thedate[2]), ThedateMonth, int(TheDay[0]), 3, 1, 42)})
-
-
-
- # Search results
- #for r in doc.xpath('//dl[@class="search_results"]/*'):
- # try:
- # if r.tag == "dt":
- # res_url = r.xpath('.//a[@class="wikilink1"]/@href')[-1]
- # title = extract_text(r.xpath('.//a[@class="wikilink1"]/@title'))
- # elif r.tag == "dd":
- # content = extract_text(r.xpath('.'))
-
- # append result
- # results.append({'title': title,
- # 'content': content,
- # 'url': base_url + res_url})
- # except:
- # continue
-
- # if not res_url:
- # continue
-
- # return results
- return results