aboutsummaryrefslogtreecommitdiff
path: root/devryzom.py
diff options
context:
space:
mode:
authorneodarz <neodarz@neodarz.net>2018-01-21 05:50:43 +0100
committerneodarz <neodarz@neodarz.net>2018-01-21 05:50:43 +0100
commit70eea5c1874ef3fffe59c5ab0a03dae25beb2d25 (patch)
treedae09eba5bcee087d1d8e98c968c022d89c73401 /devryzom.py
parent80a16a605576f85e15ffd0a3b4baf2f1dbaa2ab4 (diff)
downloadryzomcore_searx-70eea5c1874ef3fffe59c5ab0a03dae25beb2d25.tar.xz
ryzomcore_searx-70eea5c1874ef3fffe59c5ab0a03dae25beb2d25.zip
Add code for directly build and start searx preconfigured
Diffstat (limited to 'devryzom.py')
-rw-r--r--devryzom.py106
1 files changed, 0 insertions, 106 deletions
diff --git a/devryzom.py b/devryzom.py
deleted file mode 100644
index 7201096..0000000
--- a/devryzom.py
+++ /dev/null
@@ -1,106 +0,0 @@
-# Doku Wiki
-#
-# @website https://www.dokuwiki.org/
-# @provide-api yes
-# (https://www.dokuwiki.org/devel:xmlrpc)
-#
-# @using-api no
-# @results HTML
-# @stable yes
-# @parse (general) url, title, content
-
-from urllib import urlencode
-from lxml.html import fromstring
-from searx.engines.xpath import extract_text
-from datetime import datetime
-
-# engine dependent config
-categories = ['general'] # TODO , 'images', 'music', 'videos', 'files'
-paging = False
-language_support = False
-number_of_results = 5
-
-# search-url
-# Doku is OpenSearch compatible
-base_url = 'http://dev.ryzom.com'
-search_url = '/search?wiki_pages=1&{query}'
-#search_url = '/wikhan/?do=search'\
-# '&{query}'
-# TODO '&startRecord={offset}'\
-# TODO '&maximumRecords={limit}'\
-
-
-# do search-request
-def request(query, params):
-
- params['url'] = base_url +\
- search_url.format(query=urlencode({'q': query}))
-
- return params
-
-
-# get response from search-request
-def response(resp):
- results = []
-
- doc = fromstring(resp.text)
-
- # parse results
- # Quickhits
- i = 0
- for r in doc.xpath('//dl[@id="search-results"]/dt'):
- try:
- res_url = r.xpath('.//a/@href')[-1]
- except:
- continue
-
- if not res_url:
- continue
-
- title = extract_text(r.xpath('.//a'))
-
- i = i + 1
-
- y = 0
-
-
- for s in doc.xpath('//dl[@id="search-results"]/dd'):
- y = y + 1
- if y == i:
- content = extract_text(s.xpath('.//span[@class="description"]'))
-
- dataBrut = extract_text(s.xpath('.//span[@class="author"]'))
- data = dataBrut.split(' ')
- date = data[0].split('/')
-
-
- # append result
- results.append({'title': title,
- 'content': content,
- 'url': base_url + res_url,
- 'publishedDate': datetime(int(date[2]), int(date[0]), int(date[1]), 0, 0, 0)})
-
- # append result
- #results.append({'content': content})
-
- # Search results
- #for r in doc.xpath('//dl[@class="search_results"]/*'):
- # try:
- # if r.tag == "dt":
- # res_url = r.xpath('.//a[@class="wikilink1"]/@href')[-1]
- # title = extract_text(r.xpath('.//a[@class="wikilink1"]/@title'))
- # elif r.tag == "dd":
- # content = extract_text(r.xpath('.'))
-
- # append result
- # results.append({'title': title,
- # 'content': content,
- # 'url': base_url + res_url})
- # except:
- # continue
-
- # if not res_url:
- # continue
-
- # return results
- return results