summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Makefile3
-rw-r--r--searx/engines/duckduckgo.py18
-rw-r--r--searx/engines/mediathekviewweb.py68
-rw-r--r--searx/engines/seznam.py64
-rw-r--r--searx/poolrequests.py12
-rw-r--r--searx/search/processors/online.py2
-rw-r--r--searx/settings.yml22
7 files changed, 157 insertions, 32 deletions
diff --git a/Makefile b/Makefile
index d148581dd..13f965b6c 100644
--- a/Makefile
+++ b/Makefile
@@ -193,7 +193,8 @@ PYLINT_FILES=\
searx/engines/google.py \
searx/engines/google_news.py \
searx/engines/google_videos.py \
- searx/engines/google_images.py
+ searx/engines/google_images.py \
+ searx/engines/mediathekviewweb.py
test.pylint: pyenvinstall
$(call cmd,pylint,$(PYLINT_FILES))
diff --git a/searx/engines/duckduckgo.py b/searx/engines/duckduckgo.py
index 7f1378264..638f1211b 100644
--- a/searx/engines/duckduckgo.py
+++ b/searx/engines/duckduckgo.py
@@ -5,7 +5,8 @@
from lxml.html import fromstring
from json import loads
-from searx.utils import extract_text, match_language, eval_xpath
+from searx.utils import extract_text, match_language, eval_xpath, dict_subset
+from searx.poolrequests import get
# about
about = {
@@ -35,6 +36,7 @@ language_aliases = {
# search-url
url = 'https://html.duckduckgo.com/html'
+url_ping = 'https://duckduckgo.com/t/sl_h'
time_range_dict = {'day': 'd',
'week': 'w',
'month': 'm'}
@@ -65,27 +67,27 @@ def request(query, params):
params['url'] = url
params['method'] = 'POST'
- params['data']['b'] = ''
params['data']['q'] = query
- params['data']['df'] = ''
+ params['data']['b'] = ''
region_code = get_region_code(params['language'], supported_languages)
if region_code:
params['data']['kl'] = region_code
params['cookies']['kl'] = region_code
- if params['time_range'] in time_range_dict:
- params['data']['df'] = time_range_dict[params['time_range']]
+ params['data']['df'] = time_range_dict.get(params['time_range'], '')
return params
# get response from search-request
def response(resp):
- results = []
+ # ping
+ headers_ping = dict_subset(resp.request.headers, ['User-Agent', 'Accept-Encoding', 'Accept', 'Cookie'])
+ get(url_ping, headers=headers_ping)
+ # parse the response
+ results = []
doc = fromstring(resp.text)
-
- # parse results
for i, r in enumerate(eval_xpath(doc, result_xpath)):
if i >= 30:
break
diff --git a/searx/engines/mediathekviewweb.py b/searx/engines/mediathekviewweb.py
new file mode 100644
index 000000000..fa442c937
--- /dev/null
+++ b/searx/engines/mediathekviewweb.py
@@ -0,0 +1,68 @@
+# SPDX-License-Identifier: AGPL-3.0-or-later
+"""MediathekViewWeb (API)
+
+"""
+
+# pylint: disable=missing-function-docstring
+
+import datetime
+from json import loads, dumps
+
+about = {
+ "website": 'https://mediathekviewweb.de/',
+ "wikidata_id": 'Q27877380',
+ "official_api_documentation": 'https://gist.github.com/bagbag/a2888478d27de0e989cf777f81fb33de',
+ "use_official_api": True,
+ "require_api_key": False,
+ "results": 'JSON',
+}
+
+categories = ['videos']
+paging = True
+time_range_support = False
+safesearch = False
+
+def request(query, params):
+
+ params['url'] = 'https://mediathekviewweb.de/api/query'
+ params['method'] = 'POST'
+ params['headers']['Content-type'] = 'text/plain'
+ params['data'] = dumps({
+ 'queries' : [
+ {
+ 'fields' : [
+ 'title',
+ 'topic',
+ ],
+ 'query' : query
+ },
+ ],
+ 'sortBy' : 'timestamp',
+ 'sortOrder' : 'desc',
+ 'future' : True,
+ 'offset' : (params['pageno'] - 1 )* 10,
+ 'size' : 10
+ })
+ return params
+
+def response(resp):
+
+ resp = loads(resp.text)
+
+ mwv_result = resp['result']
+ mwv_result_list = mwv_result['results']
+
+ results = []
+
+ for item in mwv_result_list:
+
+ item['hms'] = str(datetime.timedelta(seconds=item['duration']))
+
+ results.append({
+ 'url' : item['url_video_hd'],
+ 'title' : "%(channel)s: %(title)s (%(hms)s)" % item,
+ 'length' : item['hms'],
+ 'content' : "%(description)s" % item,
+ })
+
+ return results
diff --git a/searx/engines/seznam.py b/searx/engines/seznam.py
new file mode 100644
index 000000000..1df92a845
--- /dev/null
+++ b/searx/engines/seznam.py
@@ -0,0 +1,64 @@
+# SPDX-License-Identifier: AGPL-3.0-or-later
+"""
+ Seznam
+"""
+
+from urllib.parse import urlencode, urlparse
+from lxml import html
+from searx.poolrequests import get
+from searx.exceptions import SearxEngineAccessDeniedException
+from searx.utils import extract_text, eval_xpath_list, eval_xpath_getindex
+
+# about
+about = {
+ "website": "https://www.seznam.cz/",
+ "wikidata_id": "Q3490485",
+ "official_api_documentation": "https://api.sklik.cz/",
+ "use_official_api": False,
+ "require_api_key": False,
+ "results": "HTML",
+}
+
+base_url = 'https://search.seznam.cz/'
+
+
+def request(query, params):
+ response_index = get(base_url, headers=params['headers'], raise_for_httperror=True)
+ dom = html.fromstring(response_index.text)
+
+ url_params = {'q': query}
+ for e in eval_xpath_list(dom, '//input[@type="hidden"]'):
+ name = e.get('name')
+ value = e.get('value')
+ url_params[name] = value
+
+ params['url'] = base_url + '?' + urlencode(url_params)
+ params['cookies'] = response_index.cookies
+ return params
+
+
+def response(resp):
+ resp_url = urlparse(resp.url)
+ if resp_url.path.startswith('/verify'):
+ raise SearxEngineAccessDeniedException()
+
+ results = []
+
+ dom = html.fromstring(resp.content.decode())
+ for result_element in eval_xpath_list(dom, '//div[@id="searchpage-root"]//div[@data-dot="results"]/div'):
+ dot_data = eval_xpath_getindex(result_element, './div/div[@data-dot-data]/@data-dot-data', 0, default=None)
+ if dot_data is None:
+ title_element = eval_xpath_getindex(result_element, './/h3/a', 0)
+ results.append({
+ 'url': title_element.get('href'),
+ 'title': extract_text(title_element),
+ 'content': extract_text(eval_xpath_getindex(title_element, '../../div[2]', 0)),
+ })
+ elif dot_data == '{"reporter_name":"hint/related/relates"}':
+ suggestions_element = eval_xpath_getindex(result_element,
+ './div/div[@data-dot="main-box"]', 0, default=None)
+ if suggestions_element is not None:
+ for suggestion in eval_xpath_list(suggestions_element, './/ul/li'):
+ results.append({'suggestion': extract_text(suggestion)})
+
+ return results
diff --git a/searx/poolrequests.py b/searx/poolrequests.py
index 25a6baed9..8b8681437 100644
--- a/searx/poolrequests.py
+++ b/searx/poolrequests.py
@@ -1,7 +1,7 @@
import sys
from time import time
from itertools import cycle
-from threading import RLock, local
+from threading import local
import requests
@@ -88,10 +88,12 @@ class SessionSinglePool(requests.Session):
super().__init__()
# reuse the same adapters
- with RLock():
- self.adapters.clear()
- self.mount('https://', next(https_adapters))
- self.mount('http://', next(http_adapters))
+ self.adapters.clear()
+
+ https_adapter = threadLocal.__dict__.setdefault('https_adapter', next(https_adapters))
+ http_adapter = threadLocal.__dict__.setdefault('http_adapter', next(http_adapters))
+ self.mount('https://', https_adapter)
+ self.mount('http://', http_adapter)
def close(self):
"""Call super, but clear adapters since there are managed globaly"""
diff --git a/searx/search/processors/online.py b/searx/search/processors/online.py
index d79edd542..0cc175e1b 100644
--- a/searx/search/processors/online.py
+++ b/searx/search/processors/online.py
@@ -77,7 +77,7 @@ class OnlineProcessor(EngineProcessor):
soft_max_redirects = params.get('soft_max_redirects', max_redirects or 0)
# raise_for_status
- request_args['raise_for_httperror'] = params.get('raise_for_httperror', False)
+ request_args['raise_for_httperror'] = params.get('raise_for_httperror', True)
# specific type of request (GET or POST)
if params['method'] == 'GET':
diff --git a/searx/settings.yml b/searx/settings.yml
index 20b6c18c1..4e926d73c 100644
--- a/searx/settings.yml
+++ b/searx/settings.yml
@@ -1170,24 +1170,8 @@ engines:
- name : seznam
shortcut: szn
- engine: xpath
- paging : True
- search_url : https://search.seznam.cz/?q={query}&count=10&from={pageno}
- results_xpath: //div[@class="Page-content"]//div[contains(@class, "Result ")]
- url_xpath : ./h3/a/@href
- title_xpath : ./h3
- content_xpath : .//p[@class="Result-description"]
- suggestion_xpath: //div[@class="Related-container"]//div[@class="RelatedItem"]/div/span/a
- first_page_num : 0
- page_size : 10
+ engine: seznam
disabled : True
- about:
- website: https://www.seznam.cz/
- wikidata_id: Q3490485
- official_api_documentation: https://api.sklik.cz/
- use_official_api: false
- require_api_key: false
- results: HTML
- name : mojeek
shortcut: mjk
@@ -1258,6 +1242,10 @@ engines:
categories: videos
disabled : True
+ - name : mediathekviewweb
+ engine : mediathekviewweb
+ shortcut : mvw
+
# - name : yacy
# engine : yacy
# shortcut : ya