diff options
author | neodarz <neodarz@neodarz.net> | 2018-01-21 05:50:43 +0100 |
---|---|---|
committer | neodarz <neodarz@neodarz.net> | 2018-01-21 05:50:43 +0100 |
commit | 70eea5c1874ef3fffe59c5ab0a03dae25beb2d25 (patch) | |
tree | dae09eba5bcee087d1d8e98c968c022d89c73401 /khaganatWikhan.py | |
parent | 80a16a605576f85e15ffd0a3b4baf2f1dbaa2ab4 (diff) | |
download | ryzomcore_searx-70eea5c1874ef3fffe59c5ab0a03dae25beb2d25.tar.xz ryzomcore_searx-70eea5c1874ef3fffe59c5ab0a03dae25beb2d25.zip |
Add code for directly build and start searx preconfigured
Diffstat (limited to 'khaganatWikhan.py')
-rw-r--r-- | khaganatWikhan.py | 84 |
1 files changed, 0 insertions, 84 deletions
diff --git a/khaganatWikhan.py b/khaganatWikhan.py deleted file mode 100644 index 083696c..0000000 --- a/khaganatWikhan.py +++ /dev/null @@ -1,84 +0,0 @@ -# Doku Wiki -# -# @website https://www.dokuwiki.org/ -# @provide-api yes -# (https://www.dokuwiki.org/devel:xmlrpc) -# -# @using-api no -# @results HTML -# @stable yes -# @parse (general) url, title, content - -from urllib import urlencode -from lxml.html import fromstring -from searx.engines.xpath import extract_text - -# engine dependent config -categories = ['general'] # TODO , 'images', 'music', 'videos', 'files' -paging = False -language_support = False -number_of_results = 5 - -# search-url -# Doku is OpenSearch compatible -base_url = 'https://khaganat.net' -search_url = '/wikhan/?do=search'\ - '&{query}' -# TODO '&startRecord={offset}'\ -# TODO '&maximumRecords={limit}'\ - - -# do search-request -def request(query, params): - - params['url'] = base_url +\ - search_url.format(query=urlencode({'id': query})) - - return params - - -# get response from search-request -def response(resp): - results = [] - - doc = fromstring(resp.text) - - # parse results - # Quickhits - for r in doc.xpath('//div[@class="search_quickresult"]/ul/li'): - try: - res_url = r.xpath('.//a[@class="wikilink1"]/@href')[-1] - except: - continue - - if not res_url: - continue - - title = extract_text(r.xpath('.//a[@class="wikilink1"]/@title')) - - # append result - results.append({'title': title, - 'content': "", - 'url': base_url + res_url}) - - # Search results - for r in doc.xpath('//dl[@class="search_results"]/*'): - try: - if r.tag == "dt": - res_url = r.xpath('.//a[@class="wikilink1"]/@href')[-1] - title = extract_text(r.xpath('.//a[@class="wikilink1"]/@title')) - elif r.tag == "dd": - content = extract_text(r.xpath('.')) - - # append result - results.append({'title': title, - 'content': content, - 'url': base_url + res_url}) - except: - continue - - if not res_url: - continue - - # return results - return results |