aboutsummaryrefslogtreecommitdiff
path: root/src/forgeryzom.py
blob: 76aa50ac05c8059a1095bd06dcccd26960165bfa (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
# Doku Wiki
#
# @website     https://www.dokuwiki.org/
# @provide-api yes
#              (https://www.dokuwiki.org/devel:xmlrpc)
#
# @using-api   no
# @results     HTML
# @stable      yes
# @parse       (general)    url, title, content

from urllib import urlencode
from lxml.html import fromstring
from searx.engines.xpath import extract_text
from datetime import datetime

# engine dependent config
categories = ['general']  # TODO , 'images', 'music', 'videos', 'files'
paging = False
language_support = False
number_of_results = 5

# search-url
# Doku is OpenSearch compatible
base_url = 'https://en.wiki.ryzom.com'
search_url = '/w/index.php?{query}'
# TODO             '&startRecord={offset}'\
# TODO             '&maximumRecords={limit}'\


# do search-request
def request(query, params):

    params['url'] = base_url +\
            search_url.format(query=urlencode({'search': query}))

    return params


# get response from search-request
def response(resp):
    results = []

    doc = fromstring(resp.text)

    # parse results
    # Quickhits
    for r in doc.xpath('//ul[@class="mw-search-results"]/li'):
        try:
            res_url = r.xpath('.//div[@class="mw-search-result-heading"]/a/@href')[-1]
            title = extract_text(r.xpath('.//div[@class="mw-search-result-heading"]/a/@title'))
            content = extract_text(r.xpath('.//div[@class="searchresult"]'))

            dataBrut = extract_text(r.xpath('.//div[@class="mw-search-result-data"]'))
            data = dataBrut.split('-')

            adatetime = data[1]
            data = adatetime.split(',')
            date = data[1]
            Thedate = date.split(' ')


            if Thedate[2] == "January":
                ThedateMonth = 1
            elif Thedate[2] == "February":
                ThedateMonth = 2
            elif Thedate[2] == "March":
                ThedateMonth = 3
            elif Thedate[2] == "April":
                ThedateMonth = 4
            elif Thedate[2] == "May":
                ThedateMonth = 5
            elif Thedate[2] == "June":
                ThedateMonth = 6
            elif Thedate[2] == "July":
                ThedateMonth = 7
            elif Thedate[2] == "August":
                ThedateMonth = 8
            elif Thedate[2] == "September":
                ThedateMonth = 9
            elif Thedate[2] == "October":
                ThedateMonth = 10
            elif Thedate[2] == "November":
                ThedateMonth = 11
            else:
                ThedateMonth = 12

            # append result
            results.append({'title': title,
                            'content': content,
                            'url': base_url + res_url,
                            'publishedDate': datetime(int(Thedate[3]), ThedateMonth, int(Thedate[1]), 3, 1, 42)})

        except:
            continue

    # return results
    return results