1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
|
# Doku Wiki
#
# @website https://www.dokuwiki.org/
# @provide-api yes
# (https://www.dokuwiki.org/devel:xmlrpc)
#
# @using-api no
# @results HTML
# @stable yes
# @parse (general) url, title, content
from urllib import urlencode
from lxml.html import fromstring
from searx.engines.xpath import extract_text
from datetime import datetime
# engine dependent config
categories = ['general'] # TODO , 'images', 'music', 'videos', 'files'
paging = False
language_support = False
number_of_results = 5
# search-url
# Doku is OpenSearch compatible
base_url = 'https://forge.ryzom.com'
search_url = '/wiki/W/api.php?action=query'\
'&{query}'
# TODO '&startRecord={offset}'\
# TODO '&maximumRecords={limit}'\
# do search-request
def request(query, params):
params['url'] = base_url +\
search_url.format(query=urlencode({'search': query}))
return params
# get response from search-request
def response(resp):
results = []
doc = fromstring(resp.text)
# parse results
# Quickhits
for r in doc.xpath('//ul[@class="mw-search-results"]/li'):
try:
res_url = r.xpath('.//div[@class="mw-search-result-heading"]/a/@href')[-1]
except:
continue
if not res_url:
continue
title = extract_text(r.xpath('.//div[@class="mw-search-result-heading"]/a/@title'))
content = extract_text(r.xpath('.//div[@class="searchresult"]'))
dataBrut = extract_text(r.xpath('.//div[@class="mw-search-result-data"]'))
data = dataBrut.split('-')
#date = '-'.join(dataS)
adatetime = data[1]
data = adatetime.split(',')
date = data[1]
Thedate = date.split(' ')
if Thedate[2] == "January":
ThedateMonth = 1
elif Thedate[2] == "February":
ThedateMonth = 2
elif Thedate[2] == "March":
ThedateMonth = 3
elif Thedate[2] == "April":
ThedateMonth = 4
elif Thedate[2] == "May":
ThedateMonth = 5
elif Thedate[2] == "June":
ThedateMonth = 6
elif Thedate[2] == "July":
ThedateMonth = 7
elif Thedate[2] == "August":
ThedateMonth = 8
elif Thedate[2] == "September":
ThedateMonth = 9
elif Thedate[2] == "October":
ThedateMonth = 10
elif Thedate[2] == "November":
ThedateMonth = 11
else:
ThedateMonth = 12
# append result
results.append({'title': title,
'content': content,
'url': base_url + res_url,
'publishedDate': datetime(int(Thedate[3]), ThedateMonth, int(Thedate[1]), 3, 1, 42)})
# Search results
#for r in doc.xpath('//dl[@class="search_results"]/*'):
# try:
# if r.tag == "dt":
# res_url = r.xpath('.//a[@class="wikilink1"]/@href')[-1]
# title = extract_text(r.xpath('.//a[@class="wikilink1"]/@title'))
# elif r.tag == "dd":
# content = extract_text(r.xpath('.'))
# append result
# results.append({'title': title,
# 'content': content,
# 'url': base_url + res_url})
# except:
# continue
# if not res_url:
# continue
# return results
return results
|