summaryrefslogtreecommitdiff
path: root/searx/engines/xpath.py
blob: f9528e92d084027278ac0c798b2caa8f26060b11 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""The XPath engine is a *generic* engine with which it is possible to configure
engines in the settings.

Here is a simple example of a XPath engine configured in the
:ref:`settings engine` section, further read :ref:`engines-dev`.

.. code:: yaml

  - name : bitbucket
    engine : xpath
    paging : True
    search_url : https://bitbucket.org/repo/all/{pageno}?name={query}
    url_xpath : //article[@class="repo-summary"]//a[@class="repo-link"]/@href
    title_xpath : //article[@class="repo-summary"]//a[@class="repo-link"]
    content_xpath : //article[@class="repo-summary"]/p

"""

from urllib.parse import urlencode

from lxml import html
from searx.utils import extract_text, extract_url, eval_xpath, eval_xpath_list
from searx.network import raise_for_httperror

search_url = None
"""
Search URL of the engine. Example::

    https://example.org/?search={query}&page={pageno}{time_range}{safe_search}

Replacements are:

``{query}``:
  Search terms from user.

``{pageno}``:
  Page number if engine supports pagging :py:obj:`paging`

``{lang}``:
  ISO 639-1 language code (en, de, fr ..)

``{time_range}``:
  :py:obj:`URL parameter <time_range_url>` if engine :py:obj:`supports time
  range <time_range_support>`.  The value for the parameter is taken from
  :py:obj:`time_range_map`.

``{safe_search}``:
  Safe-search :py:obj:`URL parameter <safe_search_map>` if engine
  :py:obj:`supports safe-search <safe_search_support>`.  The ``{safe_search}``
  replacement is taken from the :py:obj:`safes_search_map`.  Filter results::

      0: none, 1: moderate, 2:strict

  If not supported, the URL paramter is an empty string.

"""

lang_all = 'en'
'''Replacement ``{lang}`` in :py:obj:`search_url` if language ``all`` is
selected.
'''

no_result_for_http_status = []
'''Return empty result for these HTTP status codes instead of throwing an error.

.. code:: yaml

    no_result_for_http_status: []
'''

soft_max_redirects = 0
'''Maximum redirects, soft limit. Record an error but don't stop the engine'''

results_xpath = ''
'''XPath selector for the list of result items'''

url_xpath = None
'''XPath selector of result's ``url``.'''

content_xpath = None
'''XPath selector of result's ``content``.'''

title_xpath = None
'''XPath selector of result's ``title``.'''

thumbnail_xpath = False
'''XPath selector of result's ``img_src``.'''

suggestion_xpath = ''
'''XPath selector of result's ``suggestion``.'''

cached_xpath = ''
cached_url = ''

cookies = {}
headers = {}
'''Some engines might offer different result based on cookies or headers.
Possible use-case: To set safesearch cookie or header to moderate.'''

paging = False
'''Engine supports paging [True or False].'''

page_size = 1
'''Number of results on each page.  Only needed if the site requires not a page
number, but an offset.'''

first_page_num = 1
'''Number of the first page (usually 0 or 1).'''

time_range_support = False
'''Engine supports search time range.'''

time_range_url = '&hours={time_range_val}'
'''Time range URL parameter in the in :py:obj:`search_url`.  If no time range is
requested by the user, the URL paramter is an empty string.  The
``{time_range_val}`` replacement is taken from the :py:obj:`time_range_map`.

.. code:: yaml

    time_range_url : '&days={time_range_val}'
'''

time_range_map = {
    'day': 24,
    'week': 24 * 7,
    'month': 24 * 30,
    'year': 24 * 365,
}
'''Maps time range value from user to ``{time_range_val}`` in
:py:obj:`time_range_url`.

.. code:: yaml

    time_range_map:
      day: 1
      week: 7
      month: 30
      year: 365
'''

safe_search_support = False
'''Engine supports safe-search.'''

safe_search_map = {0: '&filter=none', 1: '&filter=moderate', 2: '&filter=strict'}
'''Maps safe-search value to ``{safe_search}`` in :py:obj:`search_url`.

.. code:: yaml

    safesearch: true
    safes_search_map:
      0: '&filter=none'
      1: '&filter=moderate'
      2: '&filter=strict'

'''


def request(query, params):
    '''Build request parameters (see :ref:`engine request`).'''
    lang = lang_all
    if params['language'] != 'all':
        lang = params['language'][:2]

    time_range = ''
    if params.get('time_range'):
        time_range_val = time_range_map.get(params.get('time_range'))
        time_range = time_range_url.format(time_range_val=time_range_val)

    safe_search = ''
    if params['safesearch']:
        safe_search = safe_search_map[params['safesearch']]

    fargs = {
        'query': urlencode({'q': query})[2:],
        'lang': lang,
        'pageno': (params['pageno'] - 1) * page_size + first_page_num,
        'time_range': time_range,
        'safe_search': safe_search,
    }

    params['cookies'].update(cookies)
    params['headers'].update(headers)

    params['url'] = search_url.format(**fargs)
    params['soft_max_redirects'] = soft_max_redirects

    params['raise_for_httperror'] = False

    return params


def response(resp):  # pylint: disable=too-many-branches
    '''Scrap *results* from the response (see :ref:`engine results`).'''
    if no_result_for_http_status and resp.status_code in no_result_for_http_status:
        return []

    raise_for_httperror(resp)

    results = []
    dom = html.fromstring(resp.text)
    is_onion = 'onions' in categories

    if results_xpath:
        for result in eval_xpath_list(dom, results_xpath):

            url = extract_url(eval_xpath_list(result, url_xpath, min_len=1), search_url)
            title = extract_text(eval_xpath_list(result, title_xpath, min_len=1))
            content = extract_text(eval_xpath_list(result, content_xpath))
            tmp_result = {'url': url, 'title': title, 'content': content}

            # add thumbnail if available
            if thumbnail_xpath:
                thumbnail_xpath_result = eval_xpath_list(result, thumbnail_xpath)
                if len(thumbnail_xpath_result) > 0:
                    tmp_result['img_src'] = extract_url(thumbnail_xpath_result, search_url)

            # add alternative cached url if available
            if cached_xpath:
                tmp_result['cached_url'] = cached_url + extract_text(eval_xpath_list(result, cached_xpath, min_len=1))

            if is_onion:
                tmp_result['is_onion'] = True

            results.append(tmp_result)

    else:
        if cached_xpath:
            for url, title, content, cached in zip(
                (extract_url(x, search_url) for x in eval_xpath_list(dom, url_xpath)),
                map(extract_text, eval_xpath_list(dom, title_xpath)),
                map(extract_text, eval_xpath_list(dom, content_xpath)),
                map(extract_text, eval_xpath_list(dom, cached_xpath)),
            ):
                results.append(
                    {
                        'url': url,
                        'title': title,
                        'content': content,
                        'cached_url': cached_url + cached,
                        'is_onion': is_onion,
                    }
                )
        else:
            for url, title, content in zip(
                (extract_url(x, search_url) for x in eval_xpath_list(dom, url_xpath)),
                map(extract_text, eval_xpath_list(dom, title_xpath)),
                map(extract_text, eval_xpath_list(dom, content_xpath)),
            ):
                results.append({'url': url, 'title': title, 'content': content, 'is_onion': is_onion})

    if suggestion_xpath:
        for suggestion in eval_xpath(dom, suggestion_xpath):
            results.append({'suggestion': extract_text(suggestion)})

    logger.debug("found %s results", len(results))
    return results