diff options
Diffstat (limited to 'searx/engines')
-rw-r--r-- | searx/engines/duckduckgo.py | 18 | ||||
-rw-r--r-- | searx/engines/mediathekviewweb.py | 68 | ||||
-rw-r--r-- | searx/engines/seznam.py | 64 | ||||
-rw-r--r-- | searx/engines/wikipedia.py | 11 |
4 files changed, 153 insertions, 8 deletions
diff --git a/searx/engines/duckduckgo.py b/searx/engines/duckduckgo.py index 7f1378264..638f1211b 100644 --- a/searx/engines/duckduckgo.py +++ b/searx/engines/duckduckgo.py @@ -5,7 +5,8 @@ from lxml.html import fromstring from json import loads -from searx.utils import extract_text, match_language, eval_xpath +from searx.utils import extract_text, match_language, eval_xpath, dict_subset +from searx.poolrequests import get # about about = { @@ -35,6 +36,7 @@ language_aliases = { # search-url url = 'https://html.duckduckgo.com/html' +url_ping = 'https://duckduckgo.com/t/sl_h' time_range_dict = {'day': 'd', 'week': 'w', 'month': 'm'} @@ -65,27 +67,27 @@ def request(query, params): params['url'] = url params['method'] = 'POST' - params['data']['b'] = '' params['data']['q'] = query - params['data']['df'] = '' + params['data']['b'] = '' region_code = get_region_code(params['language'], supported_languages) if region_code: params['data']['kl'] = region_code params['cookies']['kl'] = region_code - if params['time_range'] in time_range_dict: - params['data']['df'] = time_range_dict[params['time_range']] + params['data']['df'] = time_range_dict.get(params['time_range'], '') return params # get response from search-request def response(resp): - results = [] + # ping + headers_ping = dict_subset(resp.request.headers, ['User-Agent', 'Accept-Encoding', 'Accept', 'Cookie']) + get(url_ping, headers=headers_ping) + # parse the response + results = [] doc = fromstring(resp.text) - - # parse results for i, r in enumerate(eval_xpath(doc, result_xpath)): if i >= 30: break diff --git a/searx/engines/mediathekviewweb.py b/searx/engines/mediathekviewweb.py new file mode 100644 index 000000000..fa442c937 --- /dev/null +++ b/searx/engines/mediathekviewweb.py @@ -0,0 +1,68 @@ +# SPDX-License-Identifier: AGPL-3.0-or-later +"""MediathekViewWeb (API) + +""" + +# pylint: disable=missing-function-docstring + +import datetime +from json import loads, dumps + +about = { + "website": 'https://mediathekviewweb.de/', + "wikidata_id": 'Q27877380', + "official_api_documentation": 'https://gist.github.com/bagbag/a2888478d27de0e989cf777f81fb33de', + "use_official_api": True, + "require_api_key": False, + "results": 'JSON', +} + +categories = ['videos'] +paging = True +time_range_support = False +safesearch = False + +def request(query, params): + + params['url'] = 'https://mediathekviewweb.de/api/query' + params['method'] = 'POST' + params['headers']['Content-type'] = 'text/plain' + params['data'] = dumps({ + 'queries' : [ + { + 'fields' : [ + 'title', + 'topic', + ], + 'query' : query + }, + ], + 'sortBy' : 'timestamp', + 'sortOrder' : 'desc', + 'future' : True, + 'offset' : (params['pageno'] - 1 )* 10, + 'size' : 10 + }) + return params + +def response(resp): + + resp = loads(resp.text) + + mwv_result = resp['result'] + mwv_result_list = mwv_result['results'] + + results = [] + + for item in mwv_result_list: + + item['hms'] = str(datetime.timedelta(seconds=item['duration'])) + + results.append({ + 'url' : item['url_video_hd'], + 'title' : "%(channel)s: %(title)s (%(hms)s)" % item, + 'length' : item['hms'], + 'content' : "%(description)s" % item, + }) + + return results diff --git a/searx/engines/seznam.py b/searx/engines/seznam.py new file mode 100644 index 000000000..1df92a845 --- /dev/null +++ b/searx/engines/seznam.py @@ -0,0 +1,64 @@ +# SPDX-License-Identifier: AGPL-3.0-or-later +""" + Seznam +""" + +from urllib.parse import urlencode, urlparse +from lxml import html +from searx.poolrequests import get +from searx.exceptions import SearxEngineAccessDeniedException +from searx.utils import extract_text, eval_xpath_list, eval_xpath_getindex + +# about +about = { + "website": "https://www.seznam.cz/", + "wikidata_id": "Q3490485", + "official_api_documentation": "https://api.sklik.cz/", + "use_official_api": False, + "require_api_key": False, + "results": "HTML", +} + +base_url = 'https://search.seznam.cz/' + + +def request(query, params): + response_index = get(base_url, headers=params['headers'], raise_for_httperror=True) + dom = html.fromstring(response_index.text) + + url_params = {'q': query} + for e in eval_xpath_list(dom, '//input[@type="hidden"]'): + name = e.get('name') + value = e.get('value') + url_params[name] = value + + params['url'] = base_url + '?' + urlencode(url_params) + params['cookies'] = response_index.cookies + return params + + +def response(resp): + resp_url = urlparse(resp.url) + if resp_url.path.startswith('/verify'): + raise SearxEngineAccessDeniedException() + + results = [] + + dom = html.fromstring(resp.content.decode()) + for result_element in eval_xpath_list(dom, '//div[@id="searchpage-root"]//div[@data-dot="results"]/div'): + dot_data = eval_xpath_getindex(result_element, './div/div[@data-dot-data]/@data-dot-data', 0, default=None) + if dot_data is None: + title_element = eval_xpath_getindex(result_element, './/h3/a', 0) + results.append({ + 'url': title_element.get('href'), + 'title': extract_text(title_element), + 'content': extract_text(eval_xpath_getindex(title_element, '../../div[2]', 0)), + }) + elif dot_data == '{"reporter_name":"hint/related/relates"}': + suggestions_element = eval_xpath_getindex(result_element, + './div/div[@data-dot="main-box"]', 0, default=None) + if suggestions_element is not None: + for suggestion in eval_xpath_list(suggestions_element, './/ul/li'): + results.append({'suggestion': extract_text(suggestion)}) + + return results diff --git a/searx/engines/wikipedia.py b/searx/engines/wikipedia.py index c8e589e64..2adfefa69 100644 --- a/searx/engines/wikipedia.py +++ b/searx/engines/wikipedia.py @@ -56,6 +56,17 @@ def request(query, params): def response(resp): if resp.status_code == 404: return [] + + if resp.status_code == 400: + try: + api_result = loads(resp.text) + except: + pass + else: + if api_result['type'] == 'https://mediawiki.org/wiki/HyperSwitch/errors/bad_request' \ + and api_result['detail'] == 'title-invalid-characters': + return [] + raise_for_httperror(resp) results = [] |