aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorneodarz <neodarz@neodarz.net>2019-01-20 18:31:24 +0100
committerneodarz <neodarz@neodarz.net>2019-01-20 18:31:24 +0100
commit2ea03ea09a1b7ea64d78bc6589b6aaf5de884e26 (patch)
treee46a8685728b18bd3b730dc964ab8c30e0f5a05e
parente52994a2efb4d8d97961ff5427b3c961e2e39320 (diff)
downloadryzomcore_searx-2ea03ea09a1b7ea64d78bc6589b6aaf5de884e26.tar.xz
ryzomcore_searx-2ea03ea09a1b7ea64d78bc6589b6aaf5de884e26.zip
Clean dev ryzom engine code
-rw-r--r--src/devryzom.py65
1 files changed, 18 insertions, 47 deletions
diff --git a/src/devryzom.py b/src/devryzom.py
index 80afab5..8d31524 100644
--- a/src/devryzom.py
+++ b/src/devryzom.py
@@ -24,8 +24,6 @@ number_of_results = 5
# Doku is OpenSearch compatible
base_url = 'https://dev.ryzom.com'
search_url = '/search?wiki_pages=1&{query}'
-#search_url = '/wikhan/?do=search'\
-# '&{query}'
# TODO '&startRecord={offset}'\
# TODO '&maximumRecords={limit}'\
@@ -45,62 +43,35 @@ def response(resp):
doc = fromstring(resp.text)
- # parse results
- # Quickhits
+ # Search
i = 0
for r in doc.xpath('//dl[@id="search-results"]/dt'):
try:
res_url = r.xpath('.//a/@href')[-1]
- except:
- continue
-
- if not res_url:
- continue
-
- title = extract_text(r.xpath('.//a'))
+ title = extract_text(r.xpath('.//a'))
- i = i + 1
+ i = i + 1
- y = 0
+ y = 0
+ for s in doc.xpath('//dl[@id="search-results"]/dd'):
+ y = y + 1
+ if y == i:
+ content = extract_text(s.xpath('.//span[@class="description"]'))
+ dataBrut = extract_text(s.xpath('.//span[@class="author"]'))
- for s in doc.xpath('//dl[@id="search-results"]/dd'):
- y = y + 1
- if y == i:
- content = extract_text(s.xpath('.//span[@class="description"]'))
-
- dataBrut = extract_text(s.xpath('.//span[@class="author"]'))
- data = dataBrut.split(' ')
- date = data[0].split('/')
+ data = dataBrut.split(' ')
+ date = data[0].split('/')
+ # append result
+ results.append({'title': title,
+ 'content': content,
+ 'url': base_url + res_url,
+ 'publishedDate': datetime(int(date[2]), int(date[0]), int(date[1]), 0, 0, 0)})
- # append result
- results.append({'title': title,
- 'content': content,
- 'url': base_url + res_url,
- 'publishedDate': datetime(int(date[2]), int(date[0]), int(date[1]), 0, 0, 0)})
- # append result
- #results.append({'content': content})
-
- # Search results
- #for r in doc.xpath('//dl[@class="search_results"]/*'):
- # try:
- # if r.tag == "dt":
- # res_url = r.xpath('.//a[@class="wikilink1"]/@href')[-1]
- # title = extract_text(r.xpath('.//a[@class="wikilink1"]/@title'))
- # elif r.tag == "dd":
- # content = extract_text(r.xpath('.'))
-
- # append result
- # results.append({'title': title,
- # 'content': content,
- # 'url': base_url + res_url})
- # except:
- # continue
-
- # if not res_url:
- # continue
+ except:
+ continue
# return results
return results