aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorneodarz <neodarz@neodarz.net>2016-12-20 19:10:39 +0100
committerneodarz <neodarz@neodarz.net>2016-12-20 19:10:39 +0100
commit7332ce6d5f72305f1589383c6694e030821d4d86 (patch)
tree2137a09d90617f3cd24c1d491426bf5287918a90
downloadryzomcore_searx-7332ce6d5f72305f1589383c6694e030821d4d86.tar.xz
ryzomcore_searx-7332ce6d5f72305f1589383c6694e030821d4d86.zip
Initial release :)
-rw-r--r--README.md57
-rw-r--r--RyzomForum.py84
-rw-r--r--devryzom.py106
-rw-r--r--forgeryzom.py126
-rw-r--r--khaganatForum.py131
-rw-r--r--khaganatWikhan.py84
-rw-r--r--ryzomcoreConfluence.py130
-rw-r--r--settings.yml666
8 files changed, 1384 insertions, 0 deletions
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..dc94957
--- /dev/null
+++ b/README.md
@@ -0,0 +1,57 @@
+# A propos
+
+Ce repo contient les "moteurs de rechercher" de chaque site qui contient de la documentation sur le projet ryzom core.
+
+Si "moteurs de recherche" est entre guillmet, c'est que ce ne sont pas des moteurs à proprement parler, mais plus des crawlers qui vont récuperer les résultats fournis par les moteurs interne de chaque site.
+
+# Liste des sites
+
+| Site web | État | Commentaire |
+|----------|------|-------------|
+| [Ryzom Forge](http://forge.ryzom.com/)| OK | Récupération de la date |
+| [Ryzom Core Home](https://ryzomcore.atlassian.net/wiki/display/RC/Ryzom+Core+Home)| OK | Récupération de la date |
+| [Ryzom Core](http://www.ryzomcore.org/)| X | Erreur 500 |
+| [Wikhan](https://khaganat.net/wikhan) | OK | Sans date, mais possibilité de récupérer la date |
+| [Blog](https://khaganat.net/blog/fr:start) | x | |
+| [Ryzom dev](http://dev.ryzom.com/projects/ryzom/wiki) | OK | Récupération de la date |
+| [Forum khanagat](https://khaganat.net/forum/) | OK | Sans date et sans contenu |
+| [Forum ryzom](http://app.ryzom.com/app_forum/index.php) | X | |
+| [Google Community](https://plus.google.com/u/0/communities/103798956862568269036) | X | |
+| [nel-all archive](http://lists.nongnu.org/archive/html/nel-all/) | X | |
+| [nevrax.org](https://web.archive.org/web/20041010093002/http://nevrax.org/docs/) | X | |
+| [OpenNel Website](https://web.archive.org/web/20090615194233/http://www.opennel.org/confluence/display/NEL/Home) | X | |
+
+# Raccourcis pour chaque site :
+
+ * forge.ryzom.com : !fr
+ * ryzomcore.atlassian.net: !rcc
+ * khaganat.net/wikhan: !wik
+ * dev.ryzom.com: !dr
+ * khaganat.net/forum: !kf
+ * ryzom.com/forum: !rc
+
+# Développement
+
+Pour avoir les dernières informations sur le développement, je me suis fait un pad : https://pad.neodarz.ovh/p/searx
+
+Vous pouvez aussi tester la [démo](https://test.neodarz.ovh), attention c'est une version pré-alpha donc si sa marche, vous avez de la chance ;)
+
+Pour plus d'information sur searx, référer vous à la [doc](https://asciimoo.github.io/searx/).
+
+Afin d'ajouter un moteur dans le métamoteur, il faut modifier le fichier `setting.xml`. Exemple d'un moteur :
+
+```
+- name : leNomDeMonJoliMoteur
+ engine : leNomDuFichierDeMonJoliMoteur
+ shortcut : monJoliRaccourci
+```
+
+Les moteurs se trouvent dans le dossier engines, liste des fichiers pour chaque site :
+
+ * forge.ryzom.com : forgeryzom.py
+ * ryzomcore.atlassian.net: ryzomcoreConfluence.py
+ * khaganat.net/wikhan: khaganatWikhan.py
+ * dev.ryzom.com: devryzom.py
+ * khaganat.net/forum: khaganatForum.py
+ * ryzom.com/forum: RyzomForum.py
+
diff --git a/RyzomForum.py b/RyzomForum.py
new file mode 100644
index 0000000..083696c
--- /dev/null
+++ b/RyzomForum.py
@@ -0,0 +1,84 @@
+# Doku Wiki
+#
+# @website https://www.dokuwiki.org/
+# @provide-api yes
+# (https://www.dokuwiki.org/devel:xmlrpc)
+#
+# @using-api no
+# @results HTML
+# @stable yes
+# @parse (general) url, title, content
+
+from urllib import urlencode
+from lxml.html import fromstring
+from searx.engines.xpath import extract_text
+
+# engine dependent config
+categories = ['general'] # TODO , 'images', 'music', 'videos', 'files'
+paging = False
+language_support = False
+number_of_results = 5
+
+# search-url
+# Doku is OpenSearch compatible
+base_url = 'https://khaganat.net'
+search_url = '/wikhan/?do=search'\
+ '&{query}'
+# TODO '&startRecord={offset}'\
+# TODO '&maximumRecords={limit}'\
+
+
+# do search-request
+def request(query, params):
+
+ params['url'] = base_url +\
+ search_url.format(query=urlencode({'id': query}))
+
+ return params
+
+
+# get response from search-request
+def response(resp):
+ results = []
+
+ doc = fromstring(resp.text)
+
+ # parse results
+ # Quickhits
+ for r in doc.xpath('//div[@class="search_quickresult"]/ul/li'):
+ try:
+ res_url = r.xpath('.//a[@class="wikilink1"]/@href')[-1]
+ except:
+ continue
+
+ if not res_url:
+ continue
+
+ title = extract_text(r.xpath('.//a[@class="wikilink1"]/@title'))
+
+ # append result
+ results.append({'title': title,
+ 'content': "",
+ 'url': base_url + res_url})
+
+ # Search results
+ for r in doc.xpath('//dl[@class="search_results"]/*'):
+ try:
+ if r.tag == "dt":
+ res_url = r.xpath('.//a[@class="wikilink1"]/@href')[-1]
+ title = extract_text(r.xpath('.//a[@class="wikilink1"]/@title'))
+ elif r.tag == "dd":
+ content = extract_text(r.xpath('.'))
+
+ # append result
+ results.append({'title': title,
+ 'content': content,
+ 'url': base_url + res_url})
+ except:
+ continue
+
+ if not res_url:
+ continue
+
+ # return results
+ return results
diff --git a/devryzom.py b/devryzom.py
new file mode 100644
index 0000000..7201096
--- /dev/null
+++ b/devryzom.py
@@ -0,0 +1,106 @@
+# Doku Wiki
+#
+# @website https://www.dokuwiki.org/
+# @provide-api yes
+# (https://www.dokuwiki.org/devel:xmlrpc)
+#
+# @using-api no
+# @results HTML
+# @stable yes
+# @parse (general) url, title, content
+
+from urllib import urlencode
+from lxml.html import fromstring
+from searx.engines.xpath import extract_text
+from datetime import datetime
+
+# engine dependent config
+categories = ['general'] # TODO , 'images', 'music', 'videos', 'files'
+paging = False
+language_support = False
+number_of_results = 5
+
+# search-url
+# Doku is OpenSearch compatible
+base_url = 'http://dev.ryzom.com'
+search_url = '/search?wiki_pages=1&{query}'
+#search_url = '/wikhan/?do=search'\
+# '&{query}'
+# TODO '&startRecord={offset}'\
+# TODO '&maximumRecords={limit}'\
+
+
+# do search-request
+def request(query, params):
+
+ params['url'] = base_url +\
+ search_url.format(query=urlencode({'q': query}))
+
+ return params
+
+
+# get response from search-request
+def response(resp):
+ results = []
+
+ doc = fromstring(resp.text)
+
+ # parse results
+ # Quickhits
+ i = 0
+ for r in doc.xpath('//dl[@id="search-results"]/dt'):
+ try:
+ res_url = r.xpath('.//a/@href')[-1]
+ except:
+ continue
+
+ if not res_url:
+ continue
+
+ title = extract_text(r.xpath('.//a'))
+
+ i = i + 1
+
+ y = 0
+
+
+ for s in doc.xpath('//dl[@id="search-results"]/dd'):
+ y = y + 1
+ if y == i:
+ content = extract_text(s.xpath('.//span[@class="description"]'))
+
+ dataBrut = extract_text(s.xpath('.//span[@class="author"]'))
+ data = dataBrut.split(' ')
+ date = data[0].split('/')
+
+
+ # append result
+ results.append({'title': title,
+ 'content': content,
+ 'url': base_url + res_url,
+ 'publishedDate': datetime(int(date[2]), int(date[0]), int(date[1]), 0, 0, 0)})
+
+ # append result
+ #results.append({'content': content})
+
+ # Search results
+ #for r in doc.xpath('//dl[@class="search_results"]/*'):
+ # try:
+ # if r.tag == "dt":
+ # res_url = r.xpath('.//a[@class="wikilink1"]/@href')[-1]
+ # title = extract_text(r.xpath('.//a[@class="wikilink1"]/@title'))
+ # elif r.tag == "dd":
+ # content = extract_text(r.xpath('.'))
+
+ # append result
+ # results.append({'title': title,
+ # 'content': content,
+ # 'url': base_url + res_url})
+ # except:
+ # continue
+
+ # if not res_url:
+ # continue
+
+ # return results
+ return results
diff --git a/forgeryzom.py b/forgeryzom.py
new file mode 100644
index 0000000..850ae43
--- /dev/null
+++ b/forgeryzom.py
@@ -0,0 +1,126 @@
+# Doku Wiki
+#
+# @website https://www.dokuwiki.org/
+# @provide-api yes
+# (https://www.dokuwiki.org/devel:xmlrpc)
+#
+# @using-api no
+# @results HTML
+# @stable yes
+# @parse (general) url, title, content
+
+from urllib import urlencode
+from lxml.html import fromstring
+from searx.engines.xpath import extract_text
+from datetime import datetime
+
+# engine dependent config
+categories = ['general'] # TODO , 'images', 'music', 'videos', 'files'
+paging = False
+language_support = False
+number_of_results = 5
+
+# search-url
+# Doku is OpenSearch compatible
+base_url = 'http://forge.ryzom.com'
+search_url = '/wiki/W/api.php?action=query'\
+ '&{query}'
+# TODO '&startRecord={offset}'\
+# TODO '&maximumRecords={limit}'\
+
+
+# do search-request
+def request(query, params):
+
+ params['url'] = base_url +\
+ search_url.format(query=urlencode({'search': query}))
+
+ return params
+
+
+# get response from search-request
+def response(resp):
+ results = []
+
+ doc = fromstring(resp.text)
+
+ # parse results
+ # Quickhits
+ for r in doc.xpath('//ul[@class="mw-search-results"]/li'):
+ try:
+ res_url = r.xpath('.//div[@class="mw-search-result-heading"]/a/@href')[-1]
+ except:
+ continue
+
+ if not res_url:
+ continue
+
+ title = extract_text(r.xpath('.//div[@class="mw-search-result-heading"]/a/@title'))
+
+ content = extract_text(r.xpath('.//div[@class="searchresult"]'))
+
+ dataBrut = extract_text(r.xpath('.//div[@class="mw-search-result-data"]'))
+
+ data = dataBrut.split('-')
+
+
+
+ #date = '-'.join(dataS)
+ adatetime = data[1]
+ data = adatetime.split(',')
+ date = data[1]
+ Thedate = date.split(' ')
+
+
+ if Thedate[2] == "January":
+ ThedateMonth = 1
+ elif Thedate[2] == "February":
+ ThedateMonth = 2
+ elif Thedate[2] == "March":
+ ThedateMonth = 3
+ elif Thedate[2] == "April":
+ ThedateMonth = 4
+ elif Thedate[2] == "May":
+ ThedateMonth = 5
+ elif Thedate[2] == "June":
+ ThedateMonth = 6
+ elif Thedate[2] == "July":
+ ThedateMonth = 7
+ elif Thedate[2] == "August":
+ ThedateMonth = 8
+ elif Thedate[2] == "September":
+ ThedateMonth = 9
+ elif Thedate[2] == "October":
+ ThedateMonth = 10
+ elif Thedate[2] == "November":
+ ThedateMonth = 11
+ else:
+ ThedateMonth = 12
+
+ # append result
+ results.append({'title': title,
+ 'content': content,
+ 'url': base_url + res_url,
+ 'publishedDate': datetime(int(Thedate[3]), ThedateMonth, int(Thedate[1]), 3, 1, 42)})
+
+ # Search results
+ #for r in doc.xpath('//dl[@class="search_results"]/*'):
+ # try:
+ # if r.tag == "dt":
+ # res_url = r.xpath('.//a[@class="wikilink1"]/@href')[-1]
+ # title = extract_text(r.xpath('.//a[@class="wikilink1"]/@title'))
+ # elif r.tag == "dd":
+ # content = extract_text(r.xpath('.'))
+
+ # append result
+ # results.append({'title': title,
+ # 'content': content,
+ # 'url': base_url + res_url})
+ # except:
+ # continue
+
+ # if not res_url:
+ # continue
+
+ # return results
+ return results
diff --git a/khaganatForum.py b/khaganatForum.py
new file mode 100644
index 0000000..75cd0be
--- /dev/null
+++ b/khaganatForum.py
@@ -0,0 +1,131 @@
+# Doku Wiki
+#
+# @website https://www.dokuwiki.org/
+# @provide-api yes
+# (https://www.dokuwiki.org/devel:xmlrpc)
+#
+# @using-api no
+# @results HTML
+# @stable yes
+# @parse (general) url, title, content
+
+from urllib import urlencode
+from lxml.html import fromstring
+from searx.engines.xpath import extract_text
+from datetime import datetime
+
+# engine dependent config
+categories = ['general'] # TODO , 'images', 'music', 'videos', 'files'
+paging = False
+language_support = False
+number_of_results = 5
+
+# search-url
+# Doku is OpenSearch compatible
+base_url = 'https://khaganat.net'
+search_url = '/forum/index.php?action=search2&{query}'
+#search_url = '/wikhan/?do=search'\
+# '&{query}'
+# TODO '&startRecord={offset}'\
+# TODO '&maximumRecords={limit}'\
+
+
+# do search-request
+def request(query, params):
+
+ params['url'] = base_url +\
+ search_url.format(query=urlencode({'search': query}))
+
+ return params
+
+
+# get response from search-request
+def response(resp):
+ results = []
+
+ doc = fromstring(resp.text)
+
+ # parse results
+ # Quickhits
+ for r in doc.xpath('//div[@id="main_content_section"]/div/div/div'):
+ try:
+ res_url = r.xpath('.//div[@class="topic_details floatleft"]/h5/a/@href')[-1]
+ except:
+ continue
+
+ if not res_url:
+ continue
+
+ #content = extract_text(r.xpath('.//div[@class="list_posts"]'))
+ title = extract_text(r.xpath('.//div[@class="topic_details floatleft"]/h5'))
+
+
+
+ #dataBrut = extract_text(r.xpath('.//span[@class="date"]'))
+
+ #data = dataBrut.split('-')
+
+
+
+ #date = '-'.join(dataS)
+ #adatetime = data[1]
+ #data = adatetime.split(',')
+ #date = data[1]
+ #Thedate = dataBrut.split(' ')
+ #TheDay = Thedate[1].split(',')
+
+
+ #if Thedate[0] == "Jan":
+ # ThedateMonth = 1
+ #elif Thedate[0] == "Feb":
+ # ThedateMonth = 2
+ #elif Thedate[0] == "Mar":
+ # ThedateMonth = 3
+ #elif Thedate[0] == "Apr":
+ # ThedateMonth = 4
+ #elif Thedate[0] == "May":
+ # ThedateMonth = 5
+ #elif Thedate[0] == "Jun":
+ # ThedateMonth = 6
+ #elif Thedate[0] == "Jul":
+ # ThedateMonth = 7
+ #elif Thedate[0] == "Aug":
+ # ThedateMonth = 8
+ #elif Thedate[0] == "Sep":
+ # ThedateMonth = 9
+ #elif Thedate[0] == "Oct":
+ # ThedateMonth = 10
+ #elif Thedate[0] == "Nov":
+ # ThedateMonth = 11
+ #else:
+ # ThedateMonth = 12
+
+ # append result
+ results.append({'title': title,
+ 'content': "",
+ 'url': base_url + res_url})
+ #'publishedDate': datetime(int(Thedate[2]), ThedateMonth, int(TheDay[0]), 3, 1, 42)})
+
+
+
+ # Search results
+ #for r in doc.xpath('//dl[@class="search_results"]/*'):
+ # try:
+ # if r.tag == "dt":
+ # res_url = r.xpath('.//a[@class="wikilink1"]/@href')[-1]
+ # title = extract_text(r.xpath('.//a[@class="wikilink1"]/@title'))
+ # elif r.tag == "dd":
+ # content = extract_text(r.xpath('.'))
+
+ # append result
+ # results.append({'title': title,
+ # 'content': content,
+ # 'url': base_url + res_url})
+ # except:
+ # continue
+
+ # if not res_url:
+ # continue
+
+ # return results
+ return results
diff --git a/khaganatWikhan.py b/khaganatWikhan.py
new file mode 100644
index 0000000..083696c
--- /dev/null
+++ b/khaganatWikhan.py
@@ -0,0 +1,84 @@
+# Doku Wiki
+#
+# @website https://www.dokuwiki.org/
+# @provide-api yes
+# (https://www.dokuwiki.org/devel:xmlrpc)
+#
+# @using-api no
+# @results HTML
+# @stable yes
+# @parse (general) url, title, content
+
+from urllib import urlencode
+from lxml.html import fromstring
+from searx.engines.xpath import extract_text
+
+# engine dependent config
+categories = ['general'] # TODO , 'images', 'music', 'videos', 'files'
+paging = False
+language_support = False
+number_of_results = 5
+
+# search-url
+# Doku is OpenSearch compatible
+base_url = 'https://khaganat.net'
+search_url = '/wikhan/?do=search'\
+ '&{query}'
+# TODO '&startRecord={offset}'\
+# TODO '&maximumRecords={limit}'\
+
+
+# do search-request
+def request(query, params):
+
+ params['url'] = base_url +\
+ search_url.format(query=urlencode({'id': query}))
+
+ return params
+
+
+# get response from search-request
+def response(resp):
+ results = []
+
+ doc = fromstring(resp.text)
+
+ # parse results
+ # Quickhits
+ for r in doc.xpath('//div[@class="search_quickresult"]/ul/li'):
+ try:
+ res_url = r.xpath('.//a[@class="wikilink1"]/@href')[-1]
+ except:
+ continue
+
+ if not res_url:
+ continue
+
+ title = extract_text(r.xpath('.//a[@class="wikilink1"]/@title'))
+
+ # append result
+ results.append({'title': title,
+ 'content': "",
+ 'url': base_url + res_url})
+
+ # Search results
+ for r in doc.xpath('//dl[@class="search_results"]/*'):
+ try:
+ if r.tag == "dt":
+ res_url = r.xpath('.//a[@class="wikilink1"]/@href')[-1]
+ title = extract_text(r.xpath('.//a[@class="wikilink1"]/@title'))
+ elif r.tag == "dd":
+ content = extract_text(r.xpath('.'))
+
+ # append result
+ results.append({'title': title,
+ 'content': content,
+ 'url': base_url + res_url})
+ except:
+ continue
+
+ if not res_url:
+ continue
+
+ # return results
+ return results
diff --git a/ryzomcoreConfluence.py b/ryzomcoreConfluence.py
new file mode 100644
index 0000000..5d35c9f
--- /dev/null
+++ b/ryzomcoreConfluence.py
@@ -0,0 +1,130 @@
+# Doku Wiki
+#
+# @website https://www.dokuwiki.org/
+# @provide-api yes
+# (https://www.dokuwiki.org/devel:xmlrpc)
+#
+# @using-api no
+# @results HTML
+# @stable yes
+# @parse (general) url, title, content
+
+from urllib import urlencode
+from lxml.html import fromstring
+from searx.engines.xpath import extract_text
+from datetime import datetime
+
+# engine dependent config
+categories = ['general'] # TODO , 'images', 'music', 'videos', 'files'
+paging = False
+language_support = False
+number_of_results = 5
+
+# search-url
+# Doku is OpenSearch compatible
+base_url = 'https://ryzomcore.atlassian.net'
+search_url = '/wiki/dosearchsite.action?{query}'
+#search_url = '/wikhan/?do=search'\
+# '&{query}'
+# TODO '&startRecord={offset}'\
+# TODO '&maximumRecords={limit}'\
+
+
+# do search-request
+def request(query, params):
+
+ params['url'] = base_url +\
+ search_url.format(query=urlencode({'queryString': query}))
+
+ return params
+
+
+# get response from search-request
+def response(resp):
+ results = []
+
+ doc = fromstring(resp.text)
+
+ # parse results
+ # Quickhits
+ for r in doc.xpath('//ol[@class="search-results cql"]/li'):
+ try:
+ res_url = r.xpath('.//a[@class="search-result-link visitable"]/@href')[-1]
+ except:
+ continue
+
+ if not res_url:
+ continue
+
+ title = extract_text(r.xpath('.//a[@class="search-result-link visitable"]'))
+
+ content = extract_text(r.xpath('.//div[@class="highlights"]'))
+
+ dataBrut = extract_text(r.xpath('.//span[@class="date"]'))
+
+ #data = dataBrut.split('-')
+
+
+
+ #date = '-'.join(dataS)
+ #adatetime = data[1]
+ #data = adatetime.split(',')
+ #date = data[1]
+ Thedate = dataBrut.split(' ')
+ TheDay = Thedate[1].split(',')
+
+
+ if Thedate[0] == "Jan":
+ ThedateMonth = 1
+ elif Thedate[0] == "Feb":
+ ThedateMonth = 2
+ elif Thedate[0] == "Mar":
+ ThedateMonth = 3
+ elif Thedate[0] == "Apr":
+ ThedateMonth = 4
+ elif Thedate[0] == "May":
+ ThedateMonth = 5
+ elif Thedate[0] == "Jun":
+ ThedateMonth = 6
+ elif Thedate[0] == "Jul":
+ ThedateMonth = 7
+ elif Thedate[0] == "Aug":
+ ThedateMonth = 8
+ elif Thedate[0] == "Sep":
+ ThedateMonth = 9
+ elif Thedate[0] == "Oct":
+ ThedateMonth = 10
+ elif Thedate[0] == "Nov":
+ ThedateMonth = 11
+ else:
+ ThedateMonth = 12
+
+ # append result
+ results.append({'title': title,
+ 'content': content,
+ 'url': base_url + res_url,
+ 'publishedDate': datetime(int(Thedate[2]), ThedateMonth, int(TheDay[0]), 3, 1, 42)})
+
+
+
+ # Search results
+ #for r in doc.xpath('//dl[@class="search_results"]/*'):
+ # try:
+ # if r.tag == "dt":
+ # res_url = r.xpath('.//a[@class="wikilink1"]/@href')[-1]
+ # title = extract_text(r.xpath('.//a[@class="wikilink1"]/@title'))
+ # elif r.tag == "dd":
+ # content = extract_text(r.xpath('.'))
+
+ # append result
+ # results.append({'title': title,
+ # 'content': content,
+ # 'url': base_url + res_url})
+ # except:
+ # continue
+
+ # if not res_url:
+ # continue
+
+ # return results
+ return results
diff --git a/settings.yml b/settings.yml
new file mode 100644
index 0000000..f47a1d0
--- /dev/null
+++ b/settings.yml
@@ -0,0 +1,666 @@
+general:
+ debug : False # Debug mode, only for development
+ instance_name : "searx" # displayed name
+
+search:
+ safe_search : 0 # Filter results. 0: None, 1: Moderate, 2: Strict
+ autocomplete : "" # Existing autocomplete backends: "dbpedia", "duckduckgo", "google", "startpage", "wikipedia" - leave blank to turn it off by default
+ language : "all"
+
+server:
+ port : 8888
+ bind_address : "127.0.0.1" # address to listen on
+ secret_key : "e6a21d96debe828f1cad62074bec30a2" # change this!
+ base_url : False # Set custom base_url. Possible values: False or "https://your.custom.host/location/"
+ image_proxy : False # Proxying image results through searx
+
+ui:
+ themes_path : "" # Custom ui themes path - leave it blank if you didn't change
+ default_theme : oscar # ui theme
+ default_locale : "" # Default interface locale - leave blank to detect from browser information or use codes from the 'locales' config section
+
+# searx supports result proxification using an external service: https://github.com/asciimoo/morty
+# uncomment below section if you have running morty proxy
+#result_proxy:
+# url : http://127.0.0.1:3000/
+# key : your_morty_proxy_key
+
+outgoing: # communication with search engines
+ request_timeout : 2.0 # seconds
+ useragent_suffix : "" # suffix of searx_useragent, could contain informations like an email address to the administrator
+ pool_connections : 100 # Number of different hosts
+ pool_maxsize : 10 # Number of simultaneous requests by host
+# uncomment below section if you want to use a proxy
+# see http://docs.python-requests.org/en/latest/user/advanced/#proxies
+# SOCKS proxies are also supported: see http://docs.python-requests.org/en/master/user/advanced/#socks
+# proxies :
+# http : http://127.0.0.1:8080
+# https: http://127.0.0.1:8080
+# uncomment below section only if you have more than one network interface
+# which can be the source of outgoing search requests
+# source_ips:
+# - 1.1.1.1
+# - 1.1.1.2
+
+engines:
+ - name : khaganat.net/wikhan
+ engine : khaganatWikhan
+ shortcut : wik
+
+ - name : ryzomcore.atlassian.net/wiki
+ engine : ryzomcoreConfluence
+ shortcut : rcc
+
+ - name : forge.ryzom.com
+ engine : forgeryzom
+ shortcut: fr
+
+ - name: dev.ryzom.com
+ engine: devryzom
+ shortcut: dr
+
+ - name: khaganat.net/forum
+ engine: khaganatForum
+ shortcut: kf
+
+ - name: ryzom.com/forum
+ engine: RyzomForum
+ shortcut: rc
+
+ - name : arch linux wiki
+ engine : archlinux
+ disabled : True
+ shortcut : al
+
+ - name : archive is
+ engine : xpath
+ search_url : https://archive.is/{query}
+ url_xpath : (//div[@class="TEXT-BLOCK"]/a)/@href
+ title_xpath : (//div[@class="TEXT-BLOCK"]/a)
+ content_xpath : //div[@class="TEXT-BLOCK"]/ul/li
+ categories : general
+ timeout : 7.0
+ disabled : True
+ shortcut : ai
+
+ - name : base
+ engine : base
+ shortcut : bs
+ disabled : True
+
+ - name : wikipedia
+ engine : wikipedia
+ shortcut : wp
+ base_url : 'https://{language}.wikipedia.org/'
+ disabled : True
+
+ - name : bing
+ engine : bing
+ shortcut : bi
+ disabled : True
+
+ - name : bing images
+ engine : bing_images
+ shortcut : bii
+ disabled : True
+
+ - name : bing news
+ engine : bing_news
+ shortcut : bin
+ disabled : True
+
+ - name : bitbucket
+ engine : xpath
+ paging : True
+ search_url : https://bitbucket.org/repo/all/{pageno}?name={query}
+ url_xpath : //article[@class="repo-summary"]//a[@class="repo-link"]/@href
+ title_xpath : //article[@class="repo-summary"]//a[@class="repo-link"]
+ content_xpath : //article[@class="repo-summary"]/p
+ categories : it
+ timeout : 4.0
+ shortcut : bb
+ disabled : True
+
+ - name : crossref
+ engine : json_engine
+ paging : True
+ search_url : http://search.crossref.org/dois?q={query}&page={pageno}
+ url_query : doi
+ title_query : title
+ content_query : fullCitation
+ categories : science
+ shortcut : cr
+ disabled : True
+
+ - name : currency
+ engine : currency_convert
+ categories : general
+ shortcut : cc
+ disabled : True
+
+ - name : deezer
+ engine : deezer
+ shortcut : dz
+ disabled : True
+
+ - name : deviantart
+ engine : deviantart
+ shortcut : da
+ timeout: 3.0
+ disabled : True
+
+ - name : ddg definitions
+ engine : duckduckgo_definitions
+ shortcut : ddd
+ weight : 2
+ disabled : True
+
+ - name : digbt
+ engine : digbt
+ shortcut : dbt
+ timeout : 6.0
+ disabled : True
+
+ - name : digg
+ engine : digg
+ shortcut : dg
+ disabled : True
+
+ - name : erowid
+ engine : xpath
+ paging : True
+ first_page_num : 0
+ page_size : 30
+ search_url : https://www.erowid.org/search.php?q={query}&s={pageno}
+ url_xpath : //dl[@class="results-list"]/dt[@class="result-title"]/a/@href
+ title_xpath : //dl[@class="results-list"]/dt[@class="result-title"]/a/text()
+ content_xpath : //dl[@class="results-list"]/dd[@class="result-details"]
+ categories : general
+ shortcut : ew
+ disabled : True
+
+ - name : wikidata
+ engine : wikidata
+ shortcut : wd
+ weight : 2
+ disabled : True
+
+ - name : duckduckgo
+ engine : duckduckgo
+ shortcut : ddg
+ disabled : True
+
+# api-key required: http://www.faroo.com/hp/api/api.html#key
+# - name : faroo
+# engine : faroo
+# shortcut : fa
+# api_key : 'apikey' # required!
+
+ - name : 500px
+ engine : www500px
+ shortcut : px
+ disabled : True
+
+ - name : 1x
+ engine : www1x
+ shortcut : 1x
+ disabled : True
+
+ - name : fdroid
+ engine : fdroid
+ shortcut : fd
+ disabled : True
+
+ - name : flickr
+ categories : images
+ shortcut : fl
+ disabled : True
+
+# You can use the engine using the official stable API, but you need an API key
+# See : https://www.flickr.com/services/apps/create/
+# engine : flickr
+# api_key: 'apikey' # required!
+# Or you can use the html non-stable engine, activated by default
+ engine : flickr_noapi
+
+ - name : frinkiac
+ engine : frinkiac
+ shortcut : frk
+ disabled : True
+
+ - name : gigablast
+ engine : gigablast
+ shortcut : gb
+ timeout : 3.0
+ disabled: True
+
+ - name : gitlab
+ engine : xpath
+ paging : True
+ search_url : https://gitlab.com/search?page={pageno}&search={query}
+ url_xpath : //li[@class="project-row"]//a[@class="project"]/@href
+ title_xpath : //li[@class="project-row"]//span[contains(@class, "project-full-name")]
+ content_xpath : //li[@class="project-row"]//div[@class="description"]/p
+ categories : it
+ shortcut : gl
+ timeout : 5.0
+ disabled : True
+
+ - name : github
+ engine : github
+ shortcut : gh
+ disabled : True
+
+ - name : google
+ engine : google
+ shortcut : go
+ disabled : True
+
+ - name : google images
+ engine : google_images
+ shortcut : goi
+ disabled : True
+
+ - name : google news
+ engine : google_news
+ shortcut : gon
+ disabled : True
+
+ - name : google scholar
+ engine : xpath
+ paging : True
+ search_url : https://scholar.google.com/scholar?start={pageno}&q={query}&hl=en&as_sdt=0,5&as_vis=1
+ results_xpath : //div[@class="gs_r"]/div[@class="gs_ri"]
+ url_xpath : .//h3/a/@href
+ title_xpath : .//h3/a
+ content_xpath : .//div[@class="gs_rs"]
+ suggestion_xpath : //div[@id="gs_qsuggest"]/ul/li
+ page_size : 10
+ first_page_num : 0
+ categories : science
+ shortcut : gos
+ disabled : True
+
+ - name : google play apps
+ engine : xpath
+ search_url : https://play.google.com/store/search?q={query}&c=apps
+ url_xpath : //a[@class="title"]/@href
+ title_xpath : //a[@class="title"]
+ content_xpath : //a[@class="subtitle"]
+ categories : files
+ shortcut : gpa
+ disabled : True
+
+ - name : google play movies
+ engine : xpath
+ search_url : https://play.google.com/store/search?q={query}&c=movies
+ url_xpath : //a[@class="title"]/@href
+ title_xpath : //a[@class="title"]/@title
+ content_xpath : //a[contains(@class, "subtitle")]
+ categories : videos
+ shortcut : gpm
+ disabled : True
+
+ - name : google play music
+ engine : xpath
+ search_url : https://play.google.com/store/search?q={query}&c=music
+ url_xpath : //a[@class="title"]/@href
+ title_xpath : //a[@class="title"]
+ content_xpath : //a[@class="subtitle"]
+ categories : music
+ shortcut : gps
+ disabled : True
+
+ - name : geektimes
+ engine : xpath
+ paging : True
+ search_url : https://geektimes.ru/search/page{pageno}/?q={query}
+ url_xpath : //div[@class="search_results"]//a[@class="post__title_link"]/@href
+ title_xpath : //div[@class="search_results"]//a[@class="post__title_link"]
+ content_xpath : //div[@class="search_results"]//div[contains(@class, "content")]
+ categories : it
+ timeout : 4.0
+ disabled : True
+ shortcut : gt
+
+ - name : habrahabr
+ engine : xpath
+ paging : True
+ search_url : https://habrahabr.ru/search/page{pageno}/?q={query}
+ url_xpath : //div[@class="search_results"]//a[contains(@class, "post__title_link")]/@href
+ title_xpath : //div[@class="search_results"]//a[contains(@class, "post__title_link")]
+ content_xpath : //div[@class="search_results"]//div[contains(@class, "content")]
+ categories : it
+ timeout : 4.0
+ disabled : True
+ shortcut : habr
+
+ - name : hoogle
+ engine : json_engine
+ paging : True
+ search_url : https://www.haskell.org/hoogle/?mode=json&hoogle={query}&start={pageno}
+ results_query : results
+ url_query : location
+ title_query : self
+ content_query : docs
+ page_size : 20
+ categories : it
+ shortcut : ho
+ disabled : True
+
+ - name : ina
+ engine : ina
+ shortcut : in
+ timeout : 6.0
+ disabled : True
+
+ - name: kickass
+ engine : kickass
+ shortcut : kc
+ timeout : 4.0
+ disabled : True
+
+ - name : lobste.rs
+ engine : xpath
+ search_url : https://lobste.rs/search?utf8=%E2%9C%93&q={query}&what=stories&order=relevance
+ results_xpath : //li[contains(@class, "story")]
+ url_xpath : .//span[@class="link"]/a/@href
+ title_xpath : .//span[@class="link"]/a
+ content_xpath : .//a[@class="domain"]
+ categories : it
+ shortcut : lo
+ disabled : True
+
+ - name : microsoft academic
+ engine : json_engine
+ paging : True
+ search_url : https://academic.microsoft.com/api/search/GetEntityResults?query=%40{query}%40&filters=&offset={pageno}&limit=8&correlationId=undefined
+ results_query : results
+ url_query : u
+ title_query : dn
+ content_query : d
+ page_size : 8
+ first_page_num : 0
+ categories : science
+ shortcut : ma
+ disabled : True
+
+ - name : mixcloud
+ engine : mixcloud
+ shortcut : mc
+ disabled : True
+
+ - name : nyaa
+ engine : nyaa
+ shortcut : nt
+ disabled : True
+
+ - name : openstreetmap
+ engine : openstreetmap
+ shortcut : osm
+ disabled : True
+
+ - name : openrepos
+ engine : xpath
+ paging : True
+ search_url : https://openrepos.net/search/node/{query}?page={pageno}
+ url_xpath : //li[@class="search-result"]//h3[@class="title"]/a/@href
+ title_xpath : //li[@class="search-result"]//h3[@class="title"]/a
+ content_xpath : //li[@class="search-result"]//div[@class="search-snippet-info"]//p[@class="search-snippet"]
+ categories : files
+ timeout : 4.0
+ disabled : True
+ shortcut : or
+
+ - name : pdbe
+ engine : pdbe
+ shortcut : pdb
+# Hide obsolete PDB entries.
+# Default is not to hide obsolete structures
+# hide_obsolete : False
+ disabled : True
+
+ - name : photon
+ engine : photon
+ shortcut : ph
+ disabled : True
+
+ - name : piratebay
+ engine : piratebay
+ shortcut : tpb
+ url: https://pirateproxy.red/
+ timeout : 3.0
+ disabled : True
+
+ - name : qwant
+ engine : qwant
+ shortcut : qw
+ categories : general
+ disabled : True
+
+ - name : qwant images
+ engine : qwant
+ shortcut : qwi
+ categories : images
+ disabled : True
+
+ - name : qwant news
+ engine : qwant
+ shortcut : qwn
+ categories : news
+ disabled : True
+
+ - name : qwant social
+ engine : qwant
+ shortcut : qws
+ categories : social media
+ disabled : True
+
+ - name : reddit
+ engine : reddit
+ shortcut : re
+ page_size : 25
+ timeout : 10.0
+ disabled : True
+
+ - name : scanr structures
+ shortcut: scs
+ engine : scanr_structures
+ disabled : True
+
+ - name : soundcloud
+ engine : soundcloud
+ shortcut : sc
+ disabled : True
+
+ - name : stackoverflow
+ engine : stackoverflow
+ shortcut : st
+ disabled : True
+
+ - name : searchcode doc
+ engine : searchcode_doc
+ shortcut : scd
+ disabled : True
+
+ - name : searchcode code
+ engine : searchcode_code
+ shortcut : scc
+ disabled : True
+
+ - name : spotify
+ engine : spotify
+ shortcut : stf
+ disabled : True
+
+ - name : subtitleseeker
+ engine : subtitleseeker
+ shortcut : ss
+# The language is an option. You can put any language written in english
+# Examples : English, French, German, Hungarian, Chinese...
+# language : English
+ disabled : True
+
+ - name : startpage
+ engine : startpage
+ shortcut : sp
+ timeout : 6.0
+ disabled : True
+
+ - name : ixquick
+ engine : startpage
+ base_url : 'https://www.ixquick.eu/'
+ search_url : 'https://www.ixquick.eu/do/search'
+ shortcut : iq
+ timeout : 6.0
+ disabled : True
+
+ - name : swisscows
+ engine : swisscows
+ shortcut : sw
+ disabled : True
+
+ - name : tokyotoshokan
+ engine : tokyotoshokan
+ shortcut : tt
+ timeout : 6.0
+ disabled : True
+
+ - name : twitter
+ engine : twitter
+ shortcut : tw
+ disabled : True
+
+# maybe in a fun category
+# - name : uncyclopedia
+# engine : mediawiki
+# shortcut : unc
+# base_url : https://uncyclopedia.wikia.com/
+# number_of_results : 5
+
+# tmp suspended - too slow, too many errors
+# - name : urbandictionary
+# engine : xpath
+# search_url : http://www.urbandictionary.com/define.php?term={query}
+# url_xpath : //*[@class="word"]/@href
+# title_xpath : //*[@class="def-header"]
+# content_xpath : //*[@class="meaning"]
+# shortcut : ud
+
+ - name : yahoo
+ engine : yahoo
+ shortcut : yh
+ disabled : True
+
+ - name : yandex
+ engine : yandex
+ shortcut : yn
+ disabled : True
+
+ - name : yahoo news
+ engine : yahoo_news
+ shortcut : yhn
+ disabled : True
+
+ - name : youtube
+ shortcut : yt
+ # You can use the engine using the official stable API, but you need an API key
+ # See : https://console.developers.google.com/project
+ # engine : youtube_api
+ # api_key: 'apikey' # required!
+ # Or you can use the html non-stable engine, activated by default
+ engine : youtube_noapi
+ disabled : True
+
+ - name : dailymotion
+ engine : dailymotion
+ shortcut : dm
+ disabled : True
+
+ - name : vimeo
+ engine : vimeo
+ shortcut : vm
+ disabled : True
+
+ - name : wolframalpha
+ shortcut : wa
+ # You can use the engine using the official stable API, but you need an API key
+ # See : http://products.wolframalpha.com/api/
+ # engine : wolframalpha_api
+ # api_key: '' # required!
+ engine : wolframalpha_noapi
+ timeout: 6.0
+ categories : science
+ disabled : True
+
+ - name : seedpeer
+ engine : seedpeer
+ shortcut: speu
+ categories: files, music, videos
+ disabled: True
+
+ - name : dictzone
+ engine : dictzone
+ shortcut : dc
+ disabled : True
+
+ - name : mymemory translated
+ engine : translated
+ shortcut : tl
+ timeout : 5.0
+ disabled : True
+ # You can use without an API key, but you are limited to 1000 words/day
+ # See : http://mymemory.translated.net/doc/usagelimits.php
+ # api_key : ''
+
+ - name : voat
+ engine: xpath
+ shortcut: vo
+ categories: social media
+ search_url : https://voat.co/search?q={query}
+ url_xpath : //p[contains(@class, "title")]/a/@href
+ title_xpath : //p[contains(@class, "title")]/a
+ content_xpath : //span[@class="domain"]
+ timeout : 10.0
+ disabled : True
+
+
+#The blekko technology and team have joined IBM Watson! -> https://blekko.com/
+# - name : blekko images
+# engine : blekko_images
+# locale : en-US
+# shortcut : bli
+
+# - name : yacy
+# engine : yacy
+# shortcut : ya
+# base_url : 'http://localhost:8090'
+# number_of_results : 5
+# timeout : 3.0
+
+# Doku engine lets you access to any Doku wiki instance:
+# A public one or a privete/corporate one.
+# - name : ubuntuwiki
+# engine : doku
+# shortcut : uw
+# base_url : 'http://doc.ubuntu-fr.org'
+
+locales:
+ en : English
+ bg : Български (Bulgarian)
+ de : Deutsch (German)
+ el_GR : Ελληνικά (Greek_Greece)
+ eo : Esperanto (Esperanto)
+ es : Español (Spanish)
+ fr : Français (French)
+ he : עברית (Hebrew)
+ hu : Magyar (Hungarian)
+ it : Italiano (Italian)
+ ja : 日本語 (Japanese)
+ nl : Nederlands (Dutch)
+ pt : Português (Portuguese)
+ pt_BR : Português (Portuguese_Brazil)
+ ro : Română (Romanian)
+ ru : Русский (Russian)
+ tr : Türkçe (Turkish)
+ zh : 中文 (Chinese)