summaryrefslogtreecommitdiff
path: root/searx/engines
diff options
context:
space:
mode:
Diffstat (limited to 'searx/engines')
-rw-r--r--searx/engines/1337x.py16
-rw-r--r--searx/engines/__init__.py28
-rw-r--r--searx/engines/ahmia.py11
-rw-r--r--searx/engines/apkmirror.py10
-rw-r--r--searx/engines/archlinux.py5
-rw-r--r--searx/engines/artic.py36
-rw-r--r--searx/engines/arxiv.py14
-rw-r--r--searx/engines/bandcamp.py4
-rwxr-xr-xsearx/engines/base.py23
-rw-r--r--searx/engines/bing.py39
-rw-r--r--searx/engines/bing_images.py51
-rw-r--r--searx/engines/bing_news.py36
-rw-r--r--searx/engines/bing_videos.py43
-rw-r--r--searx/engines/btdigg.py26
-rw-r--r--searx/engines/ccengine.py17
-rw-r--r--searx/engines/command.py6
-rw-r--r--searx/engines/core.py28
-rw-r--r--searx/engines/currency_convert.py5
-rw-r--r--searx/engines/dailymotion.py28
-rw-r--r--searx/engines/deezer.py18
-rw-r--r--searx/engines/demo_offline.py14
-rw-r--r--searx/engines/demo_online.py37
-rw-r--r--searx/engines/deviantart.py22
-rw-r--r--searx/engines/dictzone.py16
-rw-r--r--searx/engines/digbt.py20
-rw-r--r--searx/engines/docker_hub.py24
-rw-r--r--searx/engines/doku.py14
-rw-r--r--searx/engines/duckduckgo.py27
-rw-r--r--searx/engines/duckduckgo_definitions.py111
-rw-r--r--searx/engines/duckduckgo_images.py31
-rw-r--r--searx/engines/duden.py12
-rw-r--r--searx/engines/dummy-offline.py8
-rw-r--r--searx/engines/ebay.py23
-rw-r--r--searx/engines/elasticsearch.py6
-rw-r--r--searx/engines/etools.py9
-rw-r--r--searx/engines/fdroid.py12
-rw-r--r--searx/engines/flickr.py37
-rw-r--r--searx/engines/flickr_noapi.py24
-rw-r--r--searx/engines/framalibre.py10
-rw-r--r--searx/engines/freesound.py3
-rw-r--r--searx/engines/frinkiac.py22
-rw-r--r--searx/engines/gentoo.py17
-rw-r--r--searx/engines/gigablast.py18
-rw-r--r--searx/engines/github.py4
-rw-r--r--searx/engines/google.py163
-rw-r--r--searx/engines/google_images.py80
-rw-r--r--searx/engines/google_news.py52
-rw-r--r--searx/engines/google_scholar.py52
-rw-r--r--searx/engines/google_videos.py76
-rw-r--r--searx/engines/imdb.py30
-rw-r--r--searx/engines/ina.py20
-rw-r--r--searx/engines/invidious.py16
-rw-r--r--searx/engines/json_engine.py30
-rw-r--r--searx/engines/kickass.py27
-rw-r--r--searx/engines/loc.py22
-rw-r--r--searx/engines/mediathekviewweb.py50
-rw-r--r--searx/engines/mediawiki.py32
-rw-r--r--searx/engines/microsoft_academic.py36
-rw-r--r--searx/engines/mixcloud.py17
-rw-r--r--searx/engines/mongodb.py23
-rw-r--r--searx/engines/mysql_server.py11
-rw-r--r--searx/engines/nyaa.py22
-rw-r--r--searx/engines/openstreetmap.py81
-rw-r--r--searx/engines/pdbe.py39
-rw-r--r--searx/engines/peertube.py4
-rw-r--r--searx/engines/photon.py82
-rw-r--r--searx/engines/piratebay.py14
-rw-r--r--searx/engines/postgresql.py18
-rw-r--r--searx/engines/pubmed.py31
-rw-r--r--searx/engines/qwant.py89
-rw-r--r--searx/engines/recoll.py28
-rw-r--r--searx/engines/reddit.py5
-rw-r--r--searx/engines/redis_server.py32
-rw-r--r--searx/engines/rumble.py22
-rw-r--r--searx/engines/scanr_structures.py30
-rw-r--r--searx/engines/searchcode_code.py27
-rw-r--r--searx/engines/searx_engine.py2
-rw-r--r--searx/engines/semantic_scholar.py40
-rw-r--r--searx/engines/sepiasearch.py50
-rw-r--r--searx/engines/seznam.py12
-rw-r--r--searx/engines/sjp.py18
-rw-r--r--searx/engines/solidtorrents.py22
-rw-r--r--searx/engines/solr.py8
-rw-r--r--searx/engines/soundcloud.py44
-rw-r--r--searx/engines/spotify.py17
-rw-r--r--searx/engines/springer.py38
-rw-r--r--searx/engines/sqlite.py6
-rw-r--r--searx/engines/stackexchange.py34
-rw-r--r--searx/engines/startpage.py15
-rw-r--r--searx/engines/tokyotoshokan.py6
-rw-r--r--searx/engines/torznab.py16
-rw-r--r--searx/engines/translated.py31
-rw-r--r--searx/engines/unsplash.py34
-rw-r--r--searx/engines/vimeo.py29
-rw-r--r--searx/engines/wikidata.py243
-rw-r--r--searx/engines/wikipedia.py23
-rw-r--r--searx/engines/wolframalpha_api.py46
-rw-r--r--searx/engines/wolframalpha_noapi.py55
-rw-r--r--searx/engines/wordnik.py12
-rw-r--r--searx/engines/www1x.py16
-rw-r--r--searx/engines/xpath.py65
-rw-r--r--searx/engines/yacy.py52
-rw-r--r--searx/engines/yahoo.py83
-rw-r--r--searx/engines/yahoo_news.py32
-rw-r--r--searx/engines/youtube_api.py29
-rw-r--r--searx/engines/youtube_noapi.py125
-rw-r--r--searx/engines/zlibrary.py58
107 files changed, 1739 insertions, 1748 deletions
diff --git a/searx/engines/1337x.py b/searx/engines/1337x.py
index e6a243596..730a4c445 100644
--- a/searx/engines/1337x.py
+++ b/searx/engines/1337x.py
@@ -43,11 +43,15 @@ def response(resp):
filesize, filesize_multiplier = filesize_info.split()
filesize = get_torrent_size(filesize, filesize_multiplier)
- results.append({'url': href,
- 'title': title,
- 'seed': seed,
- 'leech': leech,
- 'filesize': filesize,
- 'template': 'torrent.html'})
+ results.append(
+ {
+ 'url': href,
+ 'title': title,
+ 'seed': seed,
+ 'leech': leech,
+ 'filesize': filesize,
+ 'template': 'torrent.html',
+ }
+ )
return results
diff --git a/searx/engines/__init__.py b/searx/engines/__init__.py
index 44ea9a4bd..fa9749e9d 100644
--- a/searx/engines/__init__.py
+++ b/searx/engines/__init__.py
@@ -57,6 +57,7 @@ engine_shortcuts = {}
"""
+
def load_engine(engine_data):
"""Load engine from ``engine_data``.
@@ -166,20 +167,19 @@ def set_language_attributes(engine):
# settings.yml
if engine.language not in engine.supported_languages:
raise ValueError(
- "settings.yml - engine: '%s' / language: '%s' not supported" % (
- engine.name, engine.language ))
+ "settings.yml - engine: '%s' / language: '%s' not supported" % (engine.name, engine.language)
+ )
if isinstance(engine.supported_languages, dict):
- engine.supported_languages = {
- engine.language : engine.supported_languages[engine.language]
- }
+ engine.supported_languages = {engine.language: engine.supported_languages[engine.language]}
else:
engine.supported_languages = [engine.language]
# find custom aliases for non standard language codes
for engine_lang in engine.supported_languages:
iso_lang = match_language(engine_lang, BABEL_LANGS, fallback=None)
- if (iso_lang
+ if (
+ iso_lang
and iso_lang != engine_lang
and not engine_lang.startswith(iso_lang)
and iso_lang not in engine.supported_languages
@@ -197,14 +197,12 @@ def set_language_attributes(engine):
}
engine.fetch_supported_languages = (
# pylint: disable=protected-access
- lambda: engine._fetch_supported_languages(
- get(engine.supported_languages_url, headers=headers))
+ lambda: engine._fetch_supported_languages(get(engine.supported_languages_url, headers=headers))
)
def update_attributes_for_tor(engine):
- if (settings['outgoing'].get('using_tor_proxy')
- and hasattr(engine, 'onion_url') ):
+ if settings['outgoing'].get('using_tor_proxy') and hasattr(engine, 'onion_url'):
engine.search_url = engine.onion_url + getattr(engine, 'search_path', '')
engine.timeout += settings['outgoing'].get('extra_proxy_timeout', 0)
@@ -217,9 +215,7 @@ def is_missing_required_attributes(engine):
missing = False
for engine_attr in dir(engine):
if not engine_attr.startswith('_') and getattr(engine, engine_attr) is None:
- logger.error(
- 'Missing engine config attribute: "{0}.{1}"'
- .format(engine.name, engine_attr))
+ logger.error('Missing engine config attribute: "{0}.{1}"'.format(engine.name, engine_attr))
missing = True
return missing
@@ -230,8 +226,7 @@ def is_engine_active(engine):
return False
# exclude onion engines if not using tor
- if ('onions' in engine.categories
- and not settings['outgoing'].get('using_tor_proxy') ):
+ if 'onions' in engine.categories and not settings['outgoing'].get('using_tor_proxy'):
return False
return True
@@ -253,8 +248,7 @@ def register_engine(engine):
def load_engines(engine_list):
- """usage: ``engine_list = settings['engines']``
- """
+ """usage: ``engine_list = settings['engines']``"""
engines.clear()
engine_shortcuts.clear()
categories.clear()
diff --git a/searx/engines/ahmia.py b/searx/engines/ahmia.py
index b9a0086bd..33e0cc393 100644
--- a/searx/engines/ahmia.py
+++ b/searx/engines/ahmia.py
@@ -25,9 +25,7 @@ page_size = 10
# search url
search_url = 'http://juhanurmihxlp77nkq76byazcldy2hlmovfu2epvl5ankdibsot4csyd.onion/search/?{query}'
time_range_support = True
-time_range_dict = {'day': 1,
- 'week': 7,
- 'month': 30}
+time_range_dict = {'day': 1, 'week': 7, 'month': 30}
# xpaths
results_xpath = '//li[@class="result"]'
@@ -54,7 +52,7 @@ def response(resp):
# trim results so there's not way too many at once
first_result_index = page_size * (resp.search_params.get('pageno', 1) - 1)
all_results = eval_xpath_list(dom, results_xpath)
- trimmed_results = all_results[first_result_index:first_result_index + page_size]
+ trimmed_results = all_results[first_result_index : first_result_index + page_size]
# get results
for result in trimmed_results:
@@ -65,10 +63,7 @@ def response(resp):
title = extract_text(eval_xpath(result, title_xpath))
content = extract_text(eval_xpath(result, content_xpath))
- results.append({'url': cleaned_url,
- 'title': title,
- 'content': content,
- 'is_onion': True})
+ results.append({'url': cleaned_url, 'title': title, 'content': content, 'is_onion': True})
# get spelling corrections
for correction in eval_xpath_list(dom, correction_xpath):
diff --git a/searx/engines/apkmirror.py b/searx/engines/apkmirror.py
index 746a8cd9c..da84bc79e 100644
--- a/searx/engines/apkmirror.py
+++ b/searx/engines/apkmirror.py
@@ -35,8 +35,8 @@ search_url = base_url + '/?post_type=app_release&searchtype=apk&page={pageno}&{q
def request(query, params):
params['url'] = search_url.format(
- pageno = params['pageno'],
- query = urlencode({'s': query}),
+ pageno=params['pageno'],
+ query=urlencode({'s': query}),
)
logger.debug("query_url --> %s", params['url'])
return params
@@ -55,11 +55,7 @@ def response(resp):
url = base_url + link.attrib.get('href') + '#downloads'
title = extract_text(link)
img_src = base_url + eval_xpath_getindex(result, './/img/@src', 0)
- res = {
- 'url': url,
- 'title': title,
- 'img_src': img_src
- }
+ res = {'url': url, 'title': title, 'img_src': img_src}
results.append(res)
diff --git a/searx/engines/archlinux.py b/searx/engines/archlinux.py
index 1aa8d0ade..1cfb3983f 100644
--- a/searx/engines/archlinux.py
+++ b/searx/engines/archlinux.py
@@ -97,7 +97,7 @@ main_langs = {
'sl': 'Slovenský',
'th': 'ไทย',
'uk': 'Українська',
- 'zh': '简体中文'
+ 'zh': '简体中文',
}
supported_languages = dict(lang_urls, **main_langs)
@@ -141,7 +141,6 @@ def response(resp):
href = urljoin(base_url, link.attrib.get('href'))
title = extract_text(link)
- results.append({'url': href,
- 'title': title})
+ results.append({'url': href, 'title': title})
return results
diff --git a/searx/engines/artic.py b/searx/engines/artic.py
index 104ab8839..c0ae0a5e7 100644
--- a/searx/engines/artic.py
+++ b/searx/engines/artic.py
@@ -27,19 +27,23 @@ nb_per_page = 20
search_api = 'https://api.artic.edu/api/v1/artworks/search?'
image_api = 'https://www.artic.edu/iiif/2/'
+
def request(query, params):
- args = urlencode({
- 'q' : query,
- 'page' : params['pageno'],
- 'fields' : 'id,title,artist_display,medium_display,image_id,date_display,dimensions,artist_titles',
- 'limit' : nb_per_page,
- })
+ args = urlencode(
+ {
+ 'q': query,
+ 'page': params['pageno'],
+ 'fields': 'id,title,artist_display,medium_display,image_id,date_display,dimensions,artist_titles',
+ 'limit': nb_per_page,
+ }
+ )
params['url'] = search_api + args
logger.debug("query_url --> %s", params['url'])
return params
+
def response(resp):
results = []
@@ -50,14 +54,16 @@ def response(resp):
if not result['image_id']:
continue
- results.append({
- 'url': 'https://artic.edu/artworks/%(id)s' % result,
- 'title': result['title'] + " (%(date_display)s) // %(artist_display)s" % result,
- 'content': result['medium_display'],
- 'author': ', '.join(result['artist_titles']),
- 'img_src': image_api + '/%(image_id)s/full/843,/0/default.jpg' % result,
- 'img_format': result['dimensions'],
- 'template': 'images.html'
- })
+ results.append(
+ {
+ 'url': 'https://artic.edu/artworks/%(id)s' % result,
+ 'title': result['title'] + " (%(date_display)s) // %(artist_display)s" % result,
+ 'content': result['medium_display'],
+ 'author': ', '.join(result['artist_titles']),
+ 'img_src': image_api + '/%(image_id)s/full/843,/0/default.jpg' % result,
+ 'img_format': result['dimensions'],
+ 'template': 'images.html',
+ }
+ )
return results
diff --git a/searx/engines/arxiv.py b/searx/engines/arxiv.py
index 09ea07ea5..a1a58172d 100644
--- a/searx/engines/arxiv.py
+++ b/searx/engines/arxiv.py
@@ -20,8 +20,9 @@ about = {
categories = ['science']
paging = True
-base_url = 'https://export.arxiv.org/api/query?search_query=all:'\
- + '{query}&start={offset}&max_results={number_of_results}'
+base_url = (
+ 'https://export.arxiv.org/api/query?search_query=all:' + '{query}&start={offset}&max_results={number_of_results}'
+)
# engine dependent config
number_of_results = 10
@@ -31,9 +32,7 @@ def request(query, params):
# basic search
offset = (params['pageno'] - 1) * number_of_results
- string_args = dict(query=query,
- offset=offset,
- number_of_results=number_of_results)
+ string_args = dict(query=query, offset=offset, number_of_results=number_of_results)
params['url'] = base_url.format(**string_args)
@@ -65,10 +64,7 @@ def response(resp):
publishedDate = datetime.strptime(eval_xpath_getindex(entry, './/published', 0).text, '%Y-%m-%dT%H:%M:%SZ')
- res_dict = {'url': url,
- 'title': title,
- 'publishedDate': publishedDate,
- 'content': content}
+ res_dict = {'url': url, 'title': title, 'publishedDate': publishedDate, 'content': content}
results.append(res_dict)
diff --git a/searx/engines/bandcamp.py b/searx/engines/bandcamp.py
index 62745243f..ba951a393 100644
--- a/searx/engines/bandcamp.py
+++ b/searx/engines/bandcamp.py
@@ -44,9 +44,7 @@ def request(query, params):
pageno : 1 # number of the requested page
'''
- search_path = search_string.format(
- query=urlencode({'q': query}),
- page=params['pageno'])
+ search_path = search_string.format(query=urlencode({'q': query}), page=params['pageno'])
params['url'] = base_url + search_path
diff --git a/searx/engines/base.py b/searx/engines/base.py
index 463274681..5a2d66619 100755
--- a/searx/engines/base.py
+++ b/searx/engines/base.py
@@ -21,8 +21,10 @@ about = {
categories = ['science']
-base_url = 'https://api.base-search.net/cgi-bin/BaseHttpSearchInterface.fcgi'\
- + '?func=PerformSearch&{query}&boost=oa&hits={hits}&offset={offset}'
+base_url = (
+ 'https://api.base-search.net/cgi-bin/BaseHttpSearchInterface.fcgi'
+ + '?func=PerformSearch&{query}&boost=oa&hits={hits}&offset={offset}'
+)
# engine dependent config
paging = True
@@ -47,7 +49,7 @@ shorcut_dict = {
'source:': 'dcsource:',
'subject:': 'dcsubject:',
'title:': 'dctitle:',
- 'type:': 'dcdctype:'
+ 'type:': 'dcdctype:',
}
@@ -59,9 +61,7 @@ def request(query, params):
# basic search
offset = (params['pageno'] - 1) * number_of_results
- string_args = dict(query=urlencode({'query': query}),
- offset=offset,
- hits=number_of_results)
+ string_args = dict(query=urlencode({'query': query}), offset=offset, hits=number_of_results)
params['url'] = base_url.format(**string_args)
@@ -93,7 +93,7 @@ def response(resp):
if len(item.text) > 300:
content += "..."
-# dates returned by the BASE API are not several formats
+ # dates returned by the BASE API are not several formats
publishedDate = None
for date_format in ['%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%d', '%Y-%m', '%Y']:
try:
@@ -103,14 +103,9 @@ def response(resp):
pass
if publishedDate is not None:
- res_dict = {'url': url,
- 'title': title,
- 'publishedDate': publishedDate,
- 'content': content}
+ res_dict = {'url': url, 'title': title, 'publishedDate': publishedDate, 'content': content}
else:
- res_dict = {'url': url,
- 'title': title,
- 'content': content}
+ res_dict = {'url': url, 'title': title, 'content': content}
results.append(res_dict)
diff --git a/searx/engines/bing.py b/searx/engines/bing.py
index 3917e54c1..59fc22be4 100644
--- a/searx/engines/bing.py
+++ b/searx/engines/bing.py
@@ -36,9 +36,11 @@ inital_query = 'search?{query}&search=&form=QBLH'
# following queries: https://www.bing.com/search?q=foo&search=&first=11&FORM=PERE
page_query = 'search?{query}&search=&first={offset}&FORM=PERE'
+
def _get_offset_from_pageno(pageno):
return (pageno - 1) * 10 + 1
+
def request(query, params):
offset = _get_offset_from_pageno(params.get('pageno', 1))
@@ -53,30 +55,23 @@ def request(query, params):
if params['language'] == 'all':
lang = 'EN'
else:
- lang = match_language(
- params['language'], supported_languages, language_aliases
- )
+ lang = match_language(params['language'], supported_languages, language_aliases)
- query = 'language:{} {}'.format(
- lang.split('-')[0].upper(), query
- )
+ query = 'language:{} {}'.format(lang.split('-')[0].upper(), query)
- search_path = search_string.format(
- query = urlencode({'q': query}),
- offset = offset)
+ search_path = search_string.format(query=urlencode({'q': query}), offset=offset)
if offset > 1:
- referer = base_url + inital_query.format(query = urlencode({'q': query}))
+ referer = base_url + inital_query.format(query=urlencode({'q': query}))
params['headers']['Referer'] = referer
- logger.debug("headers.Referer --> %s", referer )
+ logger.debug("headers.Referer --> %s", referer)
params['url'] = base_url + search_path
params['headers']['Accept-Language'] = "en-US,en;q=0.5"
- params['headers']['Accept'] = (
- 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'
- )
+ params['headers']['Accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'
return params
+
def response(resp):
results = []
@@ -87,7 +82,7 @@ def response(resp):
for result in eval_xpath(dom, '//div[@class="sa_cc"]'):
# IMO //div[@class="sa_cc"] does no longer match
- logger.debug('found //div[@class="sa_cc"] --> %s', result)
+ logger.debug('found //div[@class="sa_cc"] --> %s', result)
link = eval_xpath(result, './/h3/a')[0]
url = link.attrib.get('href')
@@ -95,11 +90,7 @@ def response(resp):
content = extract_text(eval_xpath(result, './/p'))
# append result
- results.append({
- 'url': url,
- 'title': title,
- 'content': content
- })
+ results.append({'url': url, 'title': title, 'content': content})
# parse results again if nothing is found yet
for result in eval_xpath(dom, '//li[@class="b_algo"]'):
@@ -110,18 +101,14 @@ def response(resp):
content = extract_text(eval_xpath(result, './/p'))
# append result
- results.append({
- 'url': url,
- 'title': title,
- 'content': content
- })
+ results.append({'url': url, 'title': title, 'content': content})
try:
result_len_container = "".join(eval_xpath(dom, '//span[@class="sb_count"]//text()'))
if "-" in result_len_container:
# Remove the part "from-to" for paginated request ...
- result_len_container = result_len_container[result_len_container.find("-") * 2 + 2:]
+ result_len_container = result_len_container[result_len_container.find("-") * 2 + 2 :]
result_len_container = re.sub('[^0-9]', '', result_len_container)
diff --git a/searx/engines/bing_images.py b/searx/engines/bing_images.py
index 4bee9bc7d..246d37a30 100644
--- a/searx/engines/bing_images.py
+++ b/searx/engines/bing_images.py
@@ -9,7 +9,10 @@ from json import loads
from searx.utils import match_language
from searx.engines.bing import language_aliases
-from searx.engines.bing import _fetch_supported_languages, supported_languages_url # NOQA # pylint: disable=unused-import
+from searx.engines.bing import (
+ _fetch_supported_languages,
+ supported_languages_url,
+) # NOQA # pylint: disable=unused-import
# about
about = {
@@ -31,39 +34,25 @@ number_of_results = 28
# search-url
base_url = 'https://www.bing.com/'
-search_string = 'images/search'\
- '?{query}'\
- '&count={count}'\
- '&first={first}'\
- '&tsc=ImageHoverTitle'
+search_string = 'images/search' '?{query}' '&count={count}' '&first={first}' '&tsc=ImageHoverTitle'
time_range_string = '&qft=+filterui:age-lt{interval}'
-time_range_dict = {'day': '1440',
- 'week': '10080',
- 'month': '43200',
- 'year': '525600'}
+time_range_dict = {'day': '1440', 'week': '10080', 'month': '43200', 'year': '525600'}
# safesearch definitions
-safesearch_types = {2: 'STRICT',
- 1: 'DEMOTE',
- 0: 'OFF'}
+safesearch_types = {2: 'STRICT', 1: 'DEMOTE', 0: 'OFF'}
# do search-request
def request(query, params):
offset = ((params['pageno'] - 1) * number_of_results) + 1
- search_path = search_string.format(
- query=urlencode({'q': query}),
- count=number_of_results,
- first=offset)
+ search_path = search_string.format(query=urlencode({'q': query}), count=number_of_results, first=offset)
language = match_language(params['language'], supported_languages, language_aliases).lower()
- params['cookies']['SRCHHPGUSR'] = \
- 'ADLT=' + safesearch_types.get(params['safesearch'], 'DEMOTE')
+ params['cookies']['SRCHHPGUSR'] = 'ADLT=' + safesearch_types.get(params['safesearch'], 'DEMOTE')
- params['cookies']['_EDGE_S'] = 'mkt=' + language +\
- '&ui=' + language + '&F=1'
+ params['cookies']['_EDGE_S'] = 'mkt=' + language + '&ui=' + language + '&F=1'
params['url'] = base_url + search_path
if params['time_range'] in time_range_dict:
@@ -92,14 +81,18 @@ def response(resp):
# strip 'Unicode private use area' highlighting, they render to Tux
# the Linux penguin and a standing diamond on my machine...
title = m.get('t', '').replace('\ue000', '').replace('\ue001', '')
- results.append({'template': 'images.html',
- 'url': m['purl'],
- 'thumbnail_src': m['turl'],
- 'img_src': m['murl'],
- 'content': '',
- 'title': title,
- 'source': source,
- 'img_format': img_format})
+ results.append(
+ {
+ 'template': 'images.html',
+ 'url': m['purl'],
+ 'thumbnail_src': m['turl'],
+ 'img_src': m['murl'],
+ 'content': '',
+ 'title': title,
+ 'source': source,
+ 'img_format': img_format,
+ }
+ )
except:
continue
diff --git a/searx/engines/bing_news.py b/searx/engines/bing_news.py
index f0bc8bead..22856541b 100644
--- a/searx/engines/bing_news.py
+++ b/searx/engines/bing_news.py
@@ -13,10 +13,7 @@ from datetime import datetime
from dateutil import parser
from lxml import etree
from lxml.etree import XPath
-from searx.utils import (
- match_language,
- eval_xpath_getindex
-)
+from searx.utils import match_language, eval_xpath_getindex
from searx.engines.bing import ( # pylint: disable=unused-import
language_aliases,
_fetch_supported_languages,
@@ -42,11 +39,8 @@ time_range_support = True
base_url = 'https://www.bing.com/'
search_string = 'news/search?{query}&first={offset}&format=RSS'
search_string_with_time = 'news/search?{query}&first={offset}&qft=interval%3d"{interval}"&format=RSS'
-time_range_dict = {
- 'day': '7',
- 'week': '8',
- 'month': '9'
-}
+time_range_dict = {'day': '7', 'week': '8', 'month': '9'}
+
def url_cleanup(url_string):
"""remove click"""
@@ -57,6 +51,7 @@ def url_cleanup(url_string):
url_string = query.get('url', None)
return url_string
+
def image_url_cleanup(url_string):
"""replace the http://*bing.com/th?id=... by https://www.bing.com/th?id=..."""
@@ -66,6 +61,7 @@ def image_url_cleanup(url_string):
url_string = "https://www.bing.com/th?id=" + quote(query.get('id'))
return url_string
+
def _get_url(query, language, offset, time_range):
if time_range in time_range_dict:
search_path = search_string_with_time.format(
@@ -91,6 +87,7 @@ def _get_url(query, language, offset, time_range):
)
return base_url + search_path
+
def request(query, params):
if params['time_range'] and params['time_range'] not in time_range_dict:
@@ -105,6 +102,7 @@ def request(query, params):
return params
+
def response(resp):
results = []
@@ -127,26 +125,16 @@ def response(resp):
publishedDate = datetime.now()
# thumbnail
- thumbnail = eval_xpath_getindex(
- item, XPath('./News:Image/text()', namespaces=namespaces), 0, default=None)
+ thumbnail = eval_xpath_getindex(item, XPath('./News:Image/text()', namespaces=namespaces), 0, default=None)
if thumbnail is not None:
thumbnail = image_url_cleanup(thumbnail)
# append result
if thumbnail is not None:
- results.append({
- 'url': url,
- 'title': title,
- 'publishedDate': publishedDate,
- 'content': content,
- 'img_src': thumbnail
- })
+ results.append(
+ {'url': url, 'title': title, 'publishedDate': publishedDate, 'content': content, 'img_src': thumbnail}
+ )
else:
- results.append({
- 'url': url,
- 'title': title,
- 'publishedDate': publishedDate,
- 'content': content
- })
+ results.append({'url': url, 'title': title, 'publishedDate': publishedDate, 'content': content})
return results
diff --git a/searx/engines/bing_videos.py b/searx/engines/bing_videos.py
index 2e1f13de2..ad61724a1 100644
--- a/searx/engines/bing_videos.py
+++ b/searx/engines/bing_videos.py
@@ -9,7 +9,10 @@ from urllib.parse import urlencode
from searx.utils import match_language
from searx.engines.bing import language_aliases
-from searx.engines.bing import _fetch_supported_languages, supported_languages_url # NOQA # pylint: disable=unused-import
+from searx.engines.bing import (
+ _fetch_supported_languages,
+ supported_languages_url,
+) # NOQA # pylint: disable=unused-import
# about
about = {
@@ -28,36 +31,22 @@ time_range_support = True
number_of_results = 28
base_url = 'https://www.bing.com/'
-search_string = 'videos/search'\
- '?{query}'\
- '&count={count}'\
- '&first={first}'\
- '&scope=video'\
- '&FORM=QBLH'
+search_string = 'videos/search' '?{query}' '&count={count}' '&first={first}' '&scope=video' '&FORM=QBLH'
time_range_string = '&qft=+filterui:videoage-lt{interval}'
-time_range_dict = {'day': '1440',
- 'week': '10080',
- 'month': '43200',
- 'year': '525600'}
+time_range_dict = {'day': '1440', 'week': '10080', 'month': '43200', 'year': '525600'}
# safesearch definitions
-safesearch_types = {2: 'STRICT',
- 1: 'DEMOTE',
- 0: 'OFF'}
+safesearch_types = {2: 'STRICT', 1: 'DEMOTE', 0: 'OFF'}
# do search-request
def request(query, params):
offset = ((params['pageno'] - 1) * number_of_results) + 1
- search_path = search_string.format(
- query=urlencode({'q': query}),
- count=number_of_results,
- first=offset)
+ search_path = search_string.format(query=urlencode({'q': query}), count=number_of_results, first=offset)
# safesearch cookie
- params['cookies']['SRCHHPGUSR'] = \
- 'ADLT=' + safesearch_types.get(params['safesearch'], 'DEMOTE')
+ params['cookies']['SRCHHPGUSR'] = 'ADLT=' + safesearch_types.get(params['safesearch'], 'DEMOTE')
# language cookie
language = match_language(params['language'], supported_languages, language_aliases).lower()
@@ -89,11 +78,15 @@ def response(resp):
info = ' - '.join(result.xpath('.//div[@class="mc_vtvc_meta_block"]//span/text()')).strip()
content = '{0} - {1}'.format(metadata['du'], info)
thumbnail = '{0}th?id={1}'.format(base_url, metadata['thid'])
- results.append({'url': metadata['murl'],
- 'thumbnail': thumbnail,
- 'title': metadata.get('vt', ''),
- 'content': content,
- 'template': 'videos.html'})
+ results.append(
+ {
+ 'url': metadata['murl'],
+ 'thumbnail': thumbnail,
+ 'title': metadata.get('vt', ''),
+ 'content': content,
+ 'template': 'videos.html',
+ }
+ )
except:
continue
diff --git a/searx/engines/btdigg.py b/searx/engines/btdigg.py
index cda9e9355..c5dd92105 100644
--- a/searx/engines/btdigg.py
+++ b/searx/engines/btdigg.py
@@ -11,10 +11,7 @@ from searx.utils import extract_text, get_torrent_size
about = {
"website": 'https://btdig.com',
"wikidata_id": 'Q4836698',
- "official_api_documentation": {
- 'url': 'https://btdig.com/contacts',
- 'comment': 'on demand'
- },
+ "official_api_documentation": {'url': 'https://btdig.com/contacts', 'comment': 'on demand'},
"use_official_api": False,
"require_api_key": False,
"results": 'HTML',
@@ -31,8 +28,7 @@ search_url = url + '/search?q={search_term}&p={pageno}'
# do search-request
def request(query, params):
- params['url'] = search_url.format(search_term=quote(query),
- pageno=params['pageno'] - 1)
+ params['url'] = search_url.format(search_term=quote(query), pageno=params['pageno'] - 1)
return params
@@ -77,13 +73,17 @@ def response(resp):
magnetlink = result.xpath('.//div[@class="torrent_magnet"]//a')[0].attrib['href']
# append result
- results.append({'url': href,
- 'title': title,
- 'content': content,
- 'filesize': filesize,
- 'files': files,
- 'magnetlink': magnetlink,
- 'template': 'torrent.html'})
+ results.append(
+ {
+ 'url': href,
+ 'title': title,
+ 'content': content,
+ 'filesize': filesize,
+ 'files': files,
+ 'magnetlink': magnetlink,
+ 'template': 'torrent.html',
+ }
+ )
# return results sorted by seeder
return results
diff --git a/searx/engines/ccengine.py b/searx/engines/ccengine.py
index 6f3a5adb7..93ac30c86 100644
--- a/searx/engines/ccengine.py
+++ b/searx/engines/ccengine.py
@@ -29,10 +29,7 @@ search_string = '&page={page}&page_size={nb_per_page}&format=json&{query}'
def request(query, params):
- search_path = search_string.format(
- query=urlencode({'q': query}),
- nb_per_page=nb_per_page,
- page=params['pageno'])
+ search_path = search_string.format(query=urlencode({'q': query}), nb_per_page=nb_per_page, page=params['pageno'])
params['url'] = base_url + search_path
@@ -45,9 +42,13 @@ def response(resp):
json_data = loads(resp.text)
for result in json_data['results']:
- results.append({'url': result['foreign_landing_url'],
- 'title': result['title'],
- 'img_src': result['url'],
- 'template': 'images.html'})
+ results.append(
+ {
+ 'url': result['foreign_landing_url'],
+ 'title': result['title'],
+ 'img_src': result['url'],
+ 'template': 'images.html',
+ }
+ )
return results
diff --git a/searx/engines/command.py b/searx/engines/command.py
index aca379c67..abd29e2a5 100644
--- a/searx/engines/command.py
+++ b/searx/engines/command.py
@@ -138,7 +138,7 @@ def __check_query_params(params):
def check_parsing_options(engine_settings):
- """ Checks if delimiter based parsing or regex parsing is configured correctly """
+ """Checks if delimiter based parsing or regex parsing is configured correctly"""
if 'delimiter' not in engine_settings and 'parse_regex' not in engine_settings:
raise ValueError('failed to init settings for parsing lines: missing delimiter or parse_regex')
@@ -151,7 +151,7 @@ def check_parsing_options(engine_settings):
def __parse_single_result(raw_result):
- """ Parses command line output based on configuration """
+ """Parses command line output based on configuration"""
result = {}
@@ -167,6 +167,6 @@ def __parse_single_result(raw_result):
found = regex.search(raw_result)
if not found:
return {}
- result[result_key] = raw_result[found.start():found.end()]
+ result[result_key] = raw_result[found.start() : found.end()]
return result
diff --git a/searx/engines/core.py b/searx/engines/core.py
index e83c8bbe9..1fcb68f1f 100644
--- a/searx/engines/core.py
+++ b/searx/engines/core.py
@@ -28,22 +28,24 @@ api_key = 'unset'
base_url = 'https://core.ac.uk:443/api-v2/search/'
search_string = '{query}?page={page}&pageSize={nb_per_page}&apiKey={apikey}'
+
def request(query, params):
if api_key == 'unset':
raise SearxEngineAPIException('missing CORE API key')
search_path = search_string.format(
- query = urlencode({'q': query}),
- nb_per_page = nb_per_page,
- page = params['pageno'],
- apikey = api_key,
+ query=urlencode({'q': query}),
+ nb_per_page=nb_per_page,
+ page=params['pageno'],
+ apikey=api_key,
)
params['url'] = base_url + search_path
logger.debug("query_url --> %s", params['url'])
return params
+
def response(resp):
results = []
json_data = loads(resp.text)
@@ -52,7 +54,7 @@ def response(resp):
source = result['_source']
time = source['publishedDate'] or source['depositedDate']
- if time :
+ if time:
date = datetime.fromtimestamp(time / 1000)
else:
date = None
@@ -66,12 +68,14 @@ def response(resp):
metadata.append(source['doi'])
metadata = ' / '.join(metadata)
- results.append({
- 'url': source['urls'][0].replace('http://', 'https://', 1),
- 'title': source['title'],
- 'content': source['description'],
- 'publishedDate': date,
- 'metadata' : metadata,
- })
+ results.append(
+ {
+ 'url': source['urls'][0].replace('http://', 'https://', 1),
+ 'title': source['title'],
+ 'content': source['description'],
+ 'publishedDate': date,
+ 'metadata': metadata,
+ }
+ )
return results
diff --git a/searx/engines/currency_convert.py b/searx/engines/currency_convert.py
index d4c3b5f81..969688126 100644
--- a/searx/engines/currency_convert.py
+++ b/searx/engines/currency_convert.py
@@ -30,7 +30,7 @@ def request(query, params):
def response(resp):
"""remove first and last lines to get only json"""
- json_resp = resp.text[resp.text.find('\n') + 1:resp.text.rfind('\n') - 2]
+ json_resp = resp.text[resp.text.find('\n') + 1 : resp.text.rfind('\n') - 2]
results = []
try:
conversion_rate = float(json.loads(json_resp)['conversion']['converted-amount'])
@@ -47,7 +47,8 @@ def response(resp):
)
url = 'https://duckduckgo.com/js/spice/currency/1/{0}/{1}'.format(
- resp.search_params['from'].upper(), resp.search_params['to'])
+ resp.search_params['from'].upper(), resp.search_params['to']
+ )
results.append({'answer': answer, 'url': url})
diff --git a/searx/engines/dailymotion.py b/searx/engines/dailymotion.py
index 92d368c11..5607691a4 100644
--- a/searx/engines/dailymotion.py
+++ b/searx/engines/dailymotion.py
@@ -25,8 +25,10 @@ paging = True
# search-url
# see http://www.dailymotion.com/doc/api/obj-video.html
search_url = 'https://api.dailymotion.com/videos?fields=created_time,title,description,duration,url,thumbnail_360_url,id&sort=relevance&limit=5&page={pageno}&{query}' # noqa
-embedded_url = '<iframe frameborder="0" width="540" height="304" ' +\
- 'data-src="https://www.dailymotion.com/embed/video/{videoid}" allowfullscreen></iframe>'
+embedded_url = (
+ '<iframe frameborder="0" width="540" height="304" '
+ + 'data-src="https://www.dailymotion.com/embed/video/{videoid}" allowfullscreen></iframe>'
+)
supported_languages_url = 'https://api.dailymotion.com/languages'
@@ -39,8 +41,8 @@ def request(query, params):
locale = match_language(params['language'], supported_languages)
params['url'] = search_url.format(
- query=urlencode({'search': query, 'localization': locale}),
- pageno=params['pageno'])
+ query=urlencode({'search': query, 'localization': locale}), pageno=params['pageno']
+ )
return params
@@ -67,13 +69,17 @@ def response(resp):
# http to https
thumbnail = thumbnail.replace("http://", "https://")
- results.append({'template': 'videos.html',
- 'url': url,
- 'title': title,
- 'content': content,
- 'publishedDate': publishedDate,
- 'embedded': embedded,
- 'thumbnail': thumbnail})
+ results.append(
+ {
+ 'template': 'videos.html',
+ 'url': url,
+ 'title': title,
+ 'content': content,
+ 'publishedDate': publishedDate,
+ 'embedded': embedded,
+ 'thumbnail': thumbnail,
+ }
+ )
# return results
return results
diff --git a/searx/engines/deezer.py b/searx/engines/deezer.py
index 946bd3ebe..220ac599d 100644
--- a/searx/engines/deezer.py
+++ b/searx/engines/deezer.py
@@ -24,9 +24,11 @@ paging = True
url = 'https://api.deezer.com/'
search_url = url + 'search?{query}&index={offset}'
-embedded_url = '<iframe scrolling="no" frameborder="0" allowTransparency="true" ' +\
- 'data-src="https://www.deezer.com/plugins/player?type=tracks&id={audioid}" ' +\
- 'width="540" height="80"></iframe>'
+embedded_url = (
+ '<iframe scrolling="no" frameborder="0" allowTransparency="true" '
+ + 'data-src="https://www.deezer.com/plugins/player?type=tracks&id={audioid}" '
+ + 'width="540" height="80"></iframe>'
+)
# do search-request
@@ -53,18 +55,12 @@ def response(resp):
if url.startswith('http://'):
url = 'https' + url[4:]
- content = '{} - {} - {}'.format(
- result['artist']['name'],
- result['album']['title'],
- result['title'])
+ content = '{} - {} - {}'.format(result['artist']['name'], result['album']['title'], result['title'])
embedded = embedded_url.format(audioid=result['id'])
# append result
- results.append({'url': url,
- 'title': title,
- 'embedded': embedded,
- 'content': content})
+ results.append({'url': url, 'title': title, 'embedded': embedded, 'content': content})
# return results
return results
diff --git a/searx/engines/demo_offline.py b/searx/engines/demo_offline.py
index a4a632180..aeb74f443 100644
--- a/searx/engines/demo_offline.py
+++ b/searx/engines/demo_offline.py
@@ -31,6 +31,7 @@ about = {
# if there is a need for globals, use a leading underline
_my_offline_engine = None
+
def init(engine_settings=None):
"""Initialization of the (offline) engine. The origin of this demo engine is a
simple json string which is loaded in this example while the engine is
@@ -44,11 +45,10 @@ def init(engine_settings=None):
', {"value":"first item"}'
', {"value":"second item"}'
', {"value":"third item"}'
- ']'
-
- % engine_settings.get('name')
+ ']' % engine_settings.get('name')
)
+
def search(query, request_params):
"""Query (offline) engine and return results. Assemble the list of results from
your local engine. In this demo engine we ignore the 'query' term, usual
@@ -62,11 +62,11 @@ def search(query, request_params):
for row in result_list:
entry = {
- 'query' : query,
- 'language' : request_params['language'],
- 'value' : row.get("value"),
+ 'query': query,
+ 'language': request_params['language'],
+ 'value': row.get("value"),
# choose a result template or comment out to use the *default*
- 'template' : 'key-value.html',
+ 'template': 'key-value.html',
}
ret_val.append(entry)
diff --git a/searx/engines/demo_online.py b/searx/engines/demo_online.py
index a0f736e42..e53b3c15e 100644
--- a/searx/engines/demo_online.py
+++ b/searx/engines/demo_online.py
@@ -43,6 +43,7 @@ about = {
# if there is a need for globals, use a leading underline
_my_online_engine = None
+
def init(engine_settings):
"""Initialization of the (online) engine. If no initialization is needed, drop
this init function.
@@ -51,20 +52,24 @@ def init(engine_settings):
global _my_online_engine # pylint: disable=global-statement
_my_online_engine = engine_settings.get('name')
+
def request(query, params):
"""Build up the ``params`` for the online request. In this example we build a
URL to fetch images from `artic.edu <https://artic.edu>`__
"""
- args = urlencode({
- 'q' : query,
- 'page' : params['pageno'],
- 'fields' : 'id,title,artist_display,medium_display,image_id,date_display,dimensions,artist_titles',
- 'limit' : page_size,
- })
+ args = urlencode(
+ {
+ 'q': query,
+ 'page': params['pageno'],
+ 'fields': 'id,title,artist_display,medium_display,image_id,date_display,dimensions,artist_titles',
+ 'limit': page_size,
+ }
+ )
params['url'] = search_api + args
return params
+
def response(resp):
"""Parse out the result items from the response. In this example we parse the
response from `api.artic.edu <https://artic.edu>`__ and filter out all
@@ -79,14 +84,16 @@ def response(resp):
if not result['image_id']:
continue
- results.append({
- 'url': 'https://artic.edu/artworks/%(id)s' % result,
- 'title': result['title'] + " (%(date_display)s) // %(artist_display)s" % result,
- 'content': result['medium_display'],
- 'author': ', '.join(result['artist_titles']),
- 'img_src': image_api + '/%(image_id)s/full/843,/0/default.jpg' % result,
- 'img_format': result['dimensions'],
- 'template': 'images.html'
- })
+ results.append(
+ {
+ 'url': 'https://artic.edu/artworks/%(id)s' % result,
+ 'title': result['title'] + " (%(date_display)s) // %(artist_display)s" % result,
+ 'content': result['medium_display'],
+ 'author': ', '.join(result['artist_titles']),
+ 'img_src': image_api + '/%(image_id)s/full/843,/0/default.jpg' % result,
+ 'img_format': result['dimensions'],
+ 'template': 'images.html',
+ }
+ )
return results
diff --git a/searx/engines/deviantart.py b/searx/engines/deviantart.py
index b13d54dd5..e44ac28e5 100644
--- a/searx/engines/deviantart.py
+++ b/searx/engines/deviantart.py
@@ -32,13 +32,14 @@ time_range_dict = {
# search-url
base_url = 'https://www.deviantart.com'
+
def request(query, params):
# https://www.deviantart.com/search/deviations?page=5&q=foo
- query = {
- 'page' : params['pageno'],
- 'q' : query,
+ query = {
+ 'page': params['pageno'],
+ 'q': query,
}
if params['time_range'] in time_range_dict:
query['order'] = time_range_dict[params['time_range']]
@@ -47,6 +48,7 @@ def request(query, params):
return params
+
def response(resp):
results = []
@@ -67,11 +69,13 @@ def response(resp):
continue
img_tag = img_tag[0]
- results.append({
- 'template': 'images.html',
- 'url': a_tag.attrib.get('href'),
- 'img_src': img_tag.attrib.get('src'),
- 'title': img_tag.attrib.get('alt'),
- })
+ results.append(
+ {
+ 'template': 'images.html',
+ 'url': a_tag.attrib.get('href'),
+ 'img_src': img_tag.attrib.get('src'),
+ 'title': img_tag.attrib.get('alt'),
+ }
+ )
return results
diff --git a/searx/engines/dictzone.py b/searx/engines/dictzone.py
index 4a92a22c3..126e75374 100644
--- a/searx/engines/dictzone.py
+++ b/searx/engines/dictzone.py
@@ -27,9 +27,7 @@ https_support = True
def request(query, params):
- params['url'] = url.format(from_lang=params['from_lang'][2],
- to_lang=params['to_lang'][2],
- query=params['query'])
+ params['url'] = url.format(from_lang=params['from_lang'][2], to_lang=params['to_lang'][2], query=params['query'])
return params
@@ -51,10 +49,12 @@ def response(resp):
if t.strip():
to_results.append(to_result.text_content())
- results.append({
- 'url': urljoin(str(resp.url), '?%d' % k),
- 'title': from_result.text_content(),
- 'content': '; '.join(to_results)
- })
+ results.append(
+ {
+ 'url': urljoin(str(resp.url), '?%d' % k),
+ 'title': from_result.text_content(),
+ 'content': '; '.join(to_results),
+ }
+ )
return results
diff --git a/searx/engines/digbt.py b/searx/engines/digbt.py
index 109662a49..2914e9228 100644
--- a/searx/engines/digbt.py
+++ b/searx/engines/digbt.py
@@ -48,13 +48,17 @@ def response(resp):
filesize = get_torrent_size(files_data[FILESIZE], files_data[FILESIZE_MULTIPLIER])
magnetlink = result.xpath('.//div[@class="tail"]//a[@class="title"]/@href')[0]
- results.append({'url': url,
- 'title': title,
- 'content': content,
- 'filesize': filesize,
- 'magnetlink': magnetlink,
- 'seed': 'N/A',
- 'leech': 'N/A',
- 'template': 'torrent.html'})
+ results.append(
+ {
+ 'url': url,
+ 'title': title,
+ 'content': content,
+ 'filesize': filesize,
+ 'magnetlink': magnetlink,
+ 'seed': 'N/A',
+ 'leech': 'N/A',
+ 'template': 'torrent.html',
+ }
+ )
return results
diff --git a/searx/engines/docker_hub.py b/searx/engines/docker_hub.py
index e69f677b3..1e492b196 100644
--- a/searx/engines/docker_hub.py
+++ b/searx/engines/docker_hub.py
@@ -9,13 +9,13 @@ from urllib.parse import urlencode
from dateutil import parser
about = {
- "website": 'https://hub.docker.com',
- "wikidata_id": 'Q100769064',
- "official_api_documentation": 'https://docs.docker.com/registry/spec/api/',
- "use_official_api": True,
- "require_api_key": False,
- "results": 'JSON',
- }
+ "website": 'https://hub.docker.com',
+ "wikidata_id": 'Q100769064',
+ "official_api_documentation": 'https://docs.docker.com/registry/spec/api/',
+ "use_official_api": True,
+ "require_api_key": False,
+ "results": 'JSON',
+}
categories = ['it'] # optional
paging = True
@@ -23,6 +23,7 @@ paging = True
base_url = "https://hub.docker.com/"
search_url = base_url + "api/content/v1/products/search?{query}&type=image&page_size=25"
+
def request(query, params):
params['url'] = search_url.format(query=urlencode(dict(q=query, page=params["pageno"])))
@@ -30,6 +31,7 @@ def request(query, params):
return params
+
def response(resp):
'''post-response callback
resp: requests response object
@@ -53,12 +55,8 @@ def response(resp):
result["url"] = base_url + "r/" + item.get('slug', "")
result["title"] = item.get("name")
result["content"] = item.get("short_description")
- result["publishedDate"] = parser.parse(
- item.get("updated_at") or item.get("created_at")
- )
- result["thumbnail"] = (
- item["logo_url"].get("large") or item["logo_url"].get("small")
- )
+ result["publishedDate"] = parser.parse(item.get("updated_at") or item.get("created_at"))
+ result["thumbnail"] = item["logo_url"].get("large") or item["logo_url"].get("small")
results.append(result)
return results
diff --git a/searx/engines/doku.py b/searx/engines/doku.py
index cf38b3b9a..e81131cce 100644
--- a/searx/engines/doku.py
+++ b/searx/engines/doku.py
@@ -25,8 +25,7 @@ number_of_results = 5
# search-url
# Doku is OpenSearch compatible
base_url = 'http://localhost:8090'
-search_url = '/?do=search'\
- '&{query}'
+search_url = '/?do=search' '&{query}'
# TODO '&startRecord={offset}'\
# TODO '&maximumRecords={limit}'\
@@ -34,8 +33,7 @@ search_url = '/?do=search'\
# do search-request
def request(query, params):
- params['url'] = base_url +\
- search_url.format(query=urlencode({'id': query}))
+ params['url'] = base_url + search_url.format(query=urlencode({'id': query}))
return params
@@ -60,9 +58,7 @@ def response(resp):
title = extract_text(eval_xpath(r, './/a[@class="wikilink1"]/@title'))
# append result
- results.append({'title': title,
- 'content': "",
- 'url': base_url + res_url})
+ results.append({'title': title, 'content': "", 'url': base_url + res_url})
# Search results
for r in eval_xpath(doc, '//dl[@class="search_results"]/*'):
@@ -74,9 +70,7 @@ def response(resp):
content = extract_text(eval_xpath(r, '.'))
# append result
- results.append({'title': title,
- 'content': content,
- 'url': base_url + res_url})
+ results.append({'title': title, 'content': content, 'url': base_url + res_url})
except:
continue
diff --git a/searx/engines/duckduckgo.py b/searx/engines/duckduckgo.py
index d283af81d..0d2a524df 100644
--- a/searx/engines/duckduckgo.py
+++ b/searx/engines/duckduckgo.py
@@ -39,15 +39,10 @@ language_aliases = {
'ko': 'kr-KR',
'sl-SI': 'sl-SL',
'zh-TW': 'tzh-TW',
- 'zh-HK': 'tzh-HK'
+ 'zh-HK': 'tzh-HK',
}
-time_range_dict = {
- 'day': 'd',
- 'week': 'w',
- 'month': 'm',
- 'year': 'y'
-}
+time_range_dict = {'day': 'd', 'week': 'w', 'month': 'm', 'year': 'y'}
# search-url
url = 'https://lite.duckduckgo.com/lite'
@@ -118,6 +113,7 @@ def request(query, params):
logger.debug("param cookies: %s", params['cookies'])
return params
+
# get response from search-request
def response(resp):
@@ -163,21 +159,24 @@ def response(resp):
if td_content is None:
continue
- results.append({
- 'title': a_tag.text_content(),
- 'content': extract_text(td_content),
- 'url': a_tag.get('href'),
- })
+ results.append(
+ {
+ 'title': a_tag.text_content(),
+ 'content': extract_text(td_content),
+ 'url': a_tag.get('href'),
+ }
+ )
return results
+
# get supported languages from their site
def _fetch_supported_languages(resp):
# response is a js file with regions as an embedded object
response_page = resp.text
- response_page = response_page[response_page.find('regions:{') + 8:]
- response_page = response_page[:response_page.find('}') + 1]
+ response_page = response_page[response_page.find('regions:{') + 8 :]
+ response_page = response_page[: response_page.find('}') + 1]
regions_json = loads(response_page)
supported_languages = map((lambda x: x[3:] + '-' + x[:2].upper()), regions_json.keys())
diff --git a/searx/engines/duckduckgo_definitions.py b/searx/engines/duckduckgo_definitions.py
index 3ef043964..d4e813c2b 100644
--- a/searx/engines/duckduckgo_definitions.py
+++ b/searx/engines/duckduckgo_definitions.py
@@ -10,7 +10,10 @@ from lxml import html
from searx.data import WIKIDATA_UNITS
from searx.engines.duckduckgo import language_aliases
-from searx.engines.duckduckgo import _fetch_supported_languages, supported_languages_url # NOQA # pylint: disable=unused-import
+from searx.engines.duckduckgo import (
+ _fetch_supported_languages,
+ supported_languages_url,
+) # NOQA # pylint: disable=unused-import
from searx.utils import extract_text, html_to_text, match_language, get_string_replaces_function
from searx.external_urls import get_external_url, get_earth_coordinates_url, area_to_osm_zoom
@@ -24,19 +27,15 @@ about = {
"results": 'JSON',
}
-URL = 'https://api.duckduckgo.com/'\
- + '?{query}&format=json&pretty=0&no_redirect=1&d=1'
+URL = 'https://api.duckduckgo.com/' + '?{query}&format=json&pretty=0&no_redirect=1&d=1'
-WIKIDATA_PREFIX = [
- 'http://www.wikidata.org/entity/',
- 'https://www.wikidata.org/entity/'
-]
+WIKIDATA_PREFIX = ['http://www.wikidata.org/entity/', 'https://www.wikidata.org/entity/']
replace_http_by_https = get_string_replaces_function({'http:': 'https:'})
def is_broken_text(text):
- """ duckduckgo may return something like "<a href="xxxx">http://somewhere Related website<a/>"
+ """duckduckgo may return something like "<a href="xxxx">http://somewhere Related website<a/>"
The href URL is broken, the "Related website" may contains some HTML.
@@ -61,11 +60,7 @@ def result_to_text(text, htmlResult):
def request(query, params):
params['url'] = URL.format(query=urlencode({'q': query}))
- language = match_language(
- params['language'],
- supported_languages,
- language_aliases
- )
+ language = match_language(params['language'], supported_languages, language_aliases)
language = language.split('-')[0]
params['headers']['Accept-Language'] = language
return params
@@ -127,23 +122,14 @@ def response(resp):
firstURL = ddg_result.get('FirstURL')
text = ddg_result.get('Text')
if not is_broken_text(text):
- suggestion = result_to_text(
- text,
- ddg_result.get('Result')
- )
+ suggestion = result_to_text(text, ddg_result.get('Result'))
if suggestion != heading and suggestion is not None:
results.append({'suggestion': suggestion})
elif 'Topics' in ddg_result:
suggestions = []
- relatedTopics.append({
- 'name': ddg_result.get('Name', ''),
- 'suggestions': suggestions
- })
+ relatedTopics.append({'name': ddg_result.get('Name', ''), 'suggestions': suggestions})
for topic_result in ddg_result.get('Topics', []):
- suggestion = result_to_text(
- topic_result.get('Text'),
- topic_result.get('Result')
- )
+ suggestion = result_to_text(topic_result.get('Text'), topic_result.get('Result'))
if suggestion != heading and suggestion is not None:
suggestions.append(suggestion)
@@ -152,25 +138,15 @@ def response(resp):
if abstractURL != '':
# add as result ? problem always in english
infobox_id = abstractURL
- urls.append({
- 'title': search_res.get('AbstractSource'),
- 'url': abstractURL,
- 'official': True
- })
- results.append({
- 'url': abstractURL,
- 'title': heading
- })
+ urls.append({'title': search_res.get('AbstractSource'), 'url': abstractURL, 'official': True})
+ results.append({'url': abstractURL, 'title': heading})
# definition
definitionURL = search_res.get('DefinitionURL', '')
if definitionURL != '':
# add as result ? as answer ? problem always in english
infobox_id = definitionURL
- urls.append({
- 'title': search_res.get('DefinitionSource'),
- 'url': definitionURL
- })
+ urls.append({'title': search_res.get('DefinitionSource'), 'url': definitionURL})
# to merge with wikidata's infobox
if infobox_id:
@@ -198,10 +174,7 @@ def response(resp):
# * netflix_id
external_url = get_external_url(data_type, data_value)
if external_url is not None:
- urls.append({
- 'title': data_label,
- 'url': external_url
- })
+ urls.append({'title': data_label, 'url': external_url})
elif data_type in ['instance', 'wiki_maps_trigger', 'google_play_artist_id']:
# ignore instance: Wikidata value from "Instance Of" (Qxxxx)
# ignore wiki_maps_trigger: reference to a javascript
@@ -211,11 +184,7 @@ def response(resp):
# There is already an URL for the website
pass
elif data_type == 'area':
- attributes.append({
- 'label': data_label,
- 'value': area_to_str(data_value),
- 'entity': 'P2046'
- })
+ attributes.append({'label': data_label, 'value': area_to_str(data_value), 'entity': 'P2046'})
osm_zoom = area_to_osm_zoom(data_value.get('amount'))
elif data_type == 'coordinates':
if data_value.get('globe') == 'http://www.wikidata.org/entity/Q2':
@@ -224,16 +193,9 @@ def response(resp):
coordinates = info
else:
# coordinate NOT on Earth
- attributes.append({
- 'label': data_label,
- 'value': data_value,
- 'entity': 'P625'
- })
+ attributes.append({'label': data_label, 'value': data_value, 'entity': 'P625'})
elif data_type == 'string':
- attributes.append({
- 'label': data_label,
- 'value': data_value
- })
+ attributes.append({'label': data_label, 'value': data_value})
if coordinates:
data_label = coordinates.get('label')
@@ -241,31 +203,24 @@ def response(resp):
latitude = data_value.get('latitude')
longitude = data_value.get('longitude')
url = get_earth_coordinates_url(latitude, longitude, osm_zoom)
- urls.append({
- 'title': 'OpenStreetMap',
- 'url': url,
- 'entity': 'P625'
- })
+ urls.append({'title': 'OpenStreetMap', 'url': url, 'entity': 'P625'})
if len(heading) > 0:
# TODO get infobox.meta.value where .label='article_title' # pylint: disable=fixme
- if image is None and len(attributes) == 0 and len(urls) == 1 and\
- len(relatedTopics) == 0 and len(content) == 0:
- results.append({
- 'url': urls[0]['url'],
- 'title': heading,
- 'content': content
- })
+ if image is None and len(attributes) == 0 and len(urls) == 1 and len(relatedTopics) == 0 and len(content) == 0:
+ results.append({'url': urls[0]['url'], 'title': heading, 'content': content})
else:
- results.append({
- 'infobox': heading,
- 'id': infobox_id,
- 'content': content,
- 'img_src': image,
- 'attributes': attributes,
- 'urls': urls,
- 'relatedTopics': relatedTopics
- })
+ results.append(
+ {
+ 'infobox': heading,
+ 'id': infobox_id,
+ 'content': content,
+ 'img_src': image,
+ 'attributes': attributes,
+ 'urls': urls,
+ 'relatedTopics': relatedTopics,
+ }
+ )
return results
@@ -273,7 +228,7 @@ def response(resp):
def unit_to_str(unit):
for prefix in WIKIDATA_PREFIX:
if unit.startswith(prefix):
- wikidata_entity = unit[len(prefix):]
+ wikidata_entity = unit[len(prefix) :]
return WIKIDATA_UNITS.get(wikidata_entity, unit)
return unit
diff --git a/searx/engines/duckduckgo_images.py b/searx/engines/duckduckgo_images.py
index 0daaf41e9..21e24d2c9 100644
--- a/searx/engines/duckduckgo_images.py
+++ b/searx/engines/duckduckgo_images.py
@@ -7,7 +7,10 @@ from json import loads
from urllib.parse import urlencode
from searx.exceptions import SearxEngineAPIException
from searx.engines.duckduckgo import get_region_code
-from searx.engines.duckduckgo import _fetch_supported_languages, supported_languages_url # NOQA # pylint: disable=unused-import
+from searx.engines.duckduckgo import (
+ _fetch_supported_languages,
+ supported_languages_url,
+) # NOQA # pylint: disable=unused-import
from searx.network import get
# about
@@ -41,8 +44,8 @@ def get_vqd(query, headers):
content = res.text
if content.find('vqd=\'') == -1:
raise SearxEngineAPIException('Request failed')
- vqd = content[content.find('vqd=\'') + 5:]
- vqd = vqd[:vqd.find('\'')]
+ vqd = content[content.find('vqd=\'') + 5 :]
+ vqd = vqd[: vqd.find('\'')]
return vqd
@@ -61,10 +64,10 @@ def request(query, params):
region_code = get_region_code(params['language'], lang_list=supported_languages)
if region_code:
params['url'] = images_url.format(
- query=urlencode({'q': query, 'l': region_code}), offset=offset, safesearch=safesearch, vqd=vqd)
+ query=urlencode({'q': query, 'l': region_code}), offset=offset, safesearch=safesearch, vqd=vqd
+ )
else:
- params['url'] = images_url.format(
- query=urlencode({'q': query}), offset=offset, safesearch=safesearch, vqd=vqd)
+ params['url'] = images_url.format(query=urlencode({'q': query}), offset=offset, safesearch=safesearch, vqd=vqd)
return params
@@ -84,11 +87,15 @@ def response(resp):
image = result['image']
# append result
- results.append({'template': 'images.html',
- 'title': title,
- 'content': '',
- 'thumbnail_src': thumbnail,
- 'img_src': image,
- 'url': url})
+ results.append(
+ {
+ 'template': 'images.html',
+ 'title': title,
+ 'content': '',
+ 'thumbnail_src': thumbnail,
+ 'img_src': image,
+ 'url': url,
+ }
+ )
return results
diff --git a/searx/engines/duden.py b/searx/engines/duden.py
index bc4211c67..600b61f3c 100644
--- a/searx/engines/duden.py
+++ b/searx/engines/duden.py
@@ -38,7 +38,7 @@ def request(query, params):
pageno : 1 # number of the requested page
'''
- offset = (params['pageno'] - 1)
+ offset = params['pageno'] - 1
if offset == 0:
search_url_fmt = base_url + 'suchen/dudenonline/{query}'
params['url'] = search_url_fmt.format(query=quote(query))
@@ -58,9 +58,9 @@ def response(resp):
dom = html.fromstring(resp.text)
- number_of_results_element =\
- eval_xpath_getindex(dom, '//a[@class="active" and contains(@href,"/suchen/dudenonline")]/span/text()',
- 0, default=None)
+ number_of_results_element = eval_xpath_getindex(
+ dom, '//a[@class="active" and contains(@href,"/suchen/dudenonline")]/span/text()', 0, default=None
+ )
if number_of_results_element is not None:
number_of_results_string = re.sub('[^0-9]', '', number_of_results_element)
results.append({'number_of_results': int(number_of_results_string)})
@@ -71,8 +71,6 @@ def response(resp):
title = eval_xpath(result, 'string(.//h2/a)').strip()
content = extract_text(eval_xpath(result, './/p'))
# append result
- results.append({'url': url,
- 'title': title,
- 'content': content})
+ results.append({'url': url, 'title': title, 'content': content})
return results
diff --git a/searx/engines/dummy-offline.py b/searx/engines/dummy-offline.py
index cf2f75312..632eeb2b3 100644
--- a/searx/engines/dummy-offline.py
+++ b/searx/engines/dummy-offline.py
@@ -15,6 +15,8 @@ about = {
def search(query, request_params):
- return [{
- 'result': 'this is what you get',
- }]
+ return [
+ {
+ 'result': 'this is what you get',
+ }
+ ]
diff --git a/searx/engines/ebay.py b/searx/engines/ebay.py
index 45c633b42..b7aefcb44 100644
--- a/searx/engines/ebay.py
+++ b/searx/engines/ebay.py
@@ -58,16 +58,17 @@ def response(resp):
if title == "":
continue
- results.append({
- 'url': url,
- 'title': title,
- 'content': content,
- 'price': price,
- 'shipping': shipping,
- 'source_country': source_country,
- 'thumbnail': thumbnail,
- 'template': 'products.html',
-
- })
+ results.append(
+ {
+ 'url': url,
+ 'title': title,
+ 'content': content,
+ 'price': price,
+ 'shipping': shipping,
+ 'source_country': source_country,
+ 'thumbnail': thumbnail,
+ 'template': 'products.html',
+ }
+ )
return results
diff --git a/searx/engines/elasticsearch.py b/searx/engines/elasticsearch.py
index db84a5c13..f6e207b4d 100644
--- a/searx/engines/elasticsearch.py
+++ b/searx/engines/elasticsearch.py
@@ -119,9 +119,7 @@ def response(resp):
r['template'] = 'key-value.html'
if show_metadata:
- r['metadata'] = {'index': result['_index'],
- 'id': result['_id'],
- 'score': result['_score']}
+ r['metadata'] = {'index': result['_index'], 'id': result['_id'], 'score': result['_score']}
results.append(r)
@@ -133,12 +131,10 @@ _available_query_types = {
# https://www.elastic.co/guide/en/elasticsearch/reference/current/full-text-queries.html
'match': _match_query,
'simple_query_string': _simple_query_string_query,
-
# Term-level queries
# https://www.elastic.co/guide/en/elasticsearch/reference/current/term-level-queries.html
'term': _term_query,
'terms': _terms_query,
-
# Query JSON defined by the instance administrator.
'custom': _custom_query,
}
diff --git a/searx/engines/etools.py b/searx/engines/etools.py
index bf4f4ea1f..c66ceeb4b 100644
--- a/searx/engines/etools.py
+++ b/searx/engines/etools.py
@@ -22,10 +22,7 @@ paging = False
safesearch = True
base_url = 'https://www.etools.ch'
-search_path = '/searchAdvancedSubmit.do'\
- '?query={search_term}'\
- '&pageResults=20'\
- '&safeSearch={safesearch}'
+search_path = '/searchAdvancedSubmit.do' '?query={search_term}' '&pageResults=20' '&safeSearch={safesearch}'
def request(query, params):
@@ -49,8 +46,6 @@ def response(resp):
title = extract_text(eval_xpath(result, './a//text()'))
content = extract_text(eval_xpath(result, './/div[@class="text"]//text()'))
- results.append({'url': url,
- 'title': title,
- 'content': content})
+ results.append({'url': url, 'title': title, 'content': content})
return results
diff --git a/searx/engines/fdroid.py b/searx/engines/fdroid.py
index 8fff2e384..c381b25d4 100644
--- a/searx/engines/fdroid.py
+++ b/searx/engines/fdroid.py
@@ -42,13 +42,13 @@ def response(resp):
for app in dom.xpath('//a[@class="package-header"]'):
app_url = app.xpath('./@href')[0]
app_title = extract_text(app.xpath('./div/h4[@class="package-name"]/text()'))
- app_content = extract_text(app.xpath('./div/div/span[@class="package-summary"]')).strip() \
- + ' - ' + extract_text(app.xpath('./div/div/span[@class="package-license"]')).strip()
+ app_content = (
+ extract_text(app.xpath('./div/div/span[@class="package-summary"]')).strip()
+ + ' - '
+ + extract_text(app.xpath('./div/div/span[@class="package-license"]')).strip()
+ )
app_img_src = app.xpath('./img[@class="package-icon"]/@src')[0]
- results.append({'url': app_url,
- 'title': app_title,
- 'content': app_content,
- 'img_src': app_img_src})
+ results.append({'url': app_url, 'title': app_title, 'content': app_content, 'img_src': app_img_src})
return results
diff --git a/searx/engines/flickr.py b/searx/engines/flickr.py
index b0ddf6224..b7cd76808 100644
--- a/searx/engines/flickr.py
+++ b/searx/engines/flickr.py
@@ -25,10 +25,12 @@ paging = True
api_key = None
-url = 'https://api.flickr.com/services/rest/?method=flickr.photos.search' +\
- '&api_key={api_key}&{text}&sort=relevance' +\
- '&extras=description%2C+owner_name%2C+url_o%2C+url_n%2C+url_z' +\
- '&per_page={nb_per_page}&format=json&nojsoncallback=1&page={page}'
+url = (
+ 'https://api.flickr.com/services/rest/?method=flickr.photos.search'
+ + '&api_key={api_key}&{text}&sort=relevance'
+ + '&extras=description%2C+owner_name%2C+url_o%2C+url_n%2C+url_z'
+ + '&per_page={nb_per_page}&format=json&nojsoncallback=1&page={page}'
+)
photo_url = 'https://www.flickr.com/photos/{userid}/{photoid}'
paging = True
@@ -39,10 +41,9 @@ def build_flickr_url(user_id, photo_id):
def request(query, params):
- params['url'] = url.format(text=urlencode({'text': query}),
- api_key=api_key,
- nb_per_page=nb_per_page,
- page=params['pageno'])
+ params['url'] = url.format(
+ text=urlencode({'text': query}), api_key=api_key, nb_per_page=nb_per_page, page=params['pageno']
+ )
return params
@@ -69,7 +70,7 @@ def response(resp):
else:
continue
-# For a bigger thumbnail, keep only the url_z, not the url_n
+ # For a bigger thumbnail, keep only the url_z, not the url_n
if 'url_n' in photo:
thumbnail_src = photo['url_n']
elif 'url_z' in photo:
@@ -80,13 +81,17 @@ def response(resp):
url = build_flickr_url(photo['owner'], photo['id'])
# append result
- results.append({'url': url,
- 'title': photo['title'],
- 'img_src': img_src,
- 'thumbnail_src': thumbnail_src,
- 'content': photo['description']['_content'],
- 'author': photo['ownername'],
- 'template': 'images.html'})
+ results.append(
+ {
+ 'url': url,
+ 'title': photo['title'],
+ 'img_src': img_src,
+ 'thumbnail_src': thumbnail_src,
+ 'content': photo['description']['_content'],
+ 'author': photo['ownername'],
+ 'template': 'images.html',
+ }
+ )
# return results
return results
diff --git a/searx/engines/flickr_noapi.py b/searx/engines/flickr_noapi.py
index 1d670ee50..4ff59fc52 100644
--- a/searx/engines/flickr_noapi.py
+++ b/searx/engines/flickr_noapi.py
@@ -30,10 +30,12 @@ image_sizes = ('o', 'k', 'h', 'b', 'c', 'z', 'n', 'm', 't', 'q', 's')
paging = True
time_range_support = True
-time_range_dict = {'day': 60 * 60 * 24,
- 'week': 60 * 60 * 24 * 7,
- 'month': 60 * 60 * 24 * 7 * 4,
- 'year': 60 * 60 * 24 * 7 * 52}
+time_range_dict = {
+ 'day': 60 * 60 * 24,
+ 'week': 60 * 60 * 24 * 7,
+ 'month': 60 * 60 * 24 * 7 * 4,
+ 'year': 60 * 60 * 24 * 7 * 52,
+}
def build_flickr_url(user_id, photo_id):
@@ -47,8 +49,9 @@ def _get_time_range_url(time_range):
def request(query, params):
- params['url'] = (search_url.format(query=urlencode({'text': query}), page=params['pageno'])
- + _get_time_range_url(params['time_range']))
+ params['url'] = search_url.format(query=urlencode({'text': query}), page=params['pageno']) + _get_time_range_url(
+ params['time_range']
+ )
return params
@@ -83,10 +86,9 @@ def response(resp):
for image_size in image_sizes:
if image_size in photo['sizes']:
img_src = photo['sizes'][image_size]['url']
- img_format = 'jpg ' \
- + str(photo['sizes'][image_size]['width']) \
- + 'x' \
- + str(photo['sizes'][image_size]['height'])
+ img_format = (
+ 'jpg ' + str(photo['sizes'][image_size]['width']) + 'x' + str(photo['sizes'][image_size]['height'])
+ )
break
if not img_src:
@@ -113,7 +115,7 @@ def response(resp):
'thumbnail_src': thumbnail_src,
'source': source,
'img_format': img_format,
- 'template': 'images.html'
+ 'template': 'images.html',
}
result['author'] = author.encode(errors='ignore').decode()
result['source'] = source.encode(errors='ignore').decode()
diff --git a/searx/engines/framalibre.py b/searx/engines/framalibre.py
index 42c08cf95..b2c9d9077 100644
--- a/searx/engines/framalibre.py
+++ b/searx/engines/framalibre.py
@@ -35,9 +35,8 @@ content_xpath = './/div[@class="content"]//p'
# do search-request
def request(query, params):
- offset = (params['pageno'] - 1)
- params['url'] = search_url.format(query=urlencode({'keys': query}),
- offset=offset)
+ offset = params['pageno'] - 1
+ params['url'] = search_url.format(query=urlencode({'keys': query}), offset=offset)
return params
@@ -63,10 +62,7 @@ def response(resp):
content = escape(extract_text(result.xpath(content_xpath)))
# append result
- results.append({'url': href,
- 'title': title,
- 'img_src': thumbnail,
- 'content': content})
+ results.append({'url': href, 'title': title, 'img_src': thumbnail, 'content': content})
# return results
return results
diff --git a/searx/engines/freesound.py b/searx/engines/freesound.py
index d2564946c..121a6a5b0 100644
--- a/searx/engines/freesound.py
+++ b/searx/engines/freesound.py
@@ -26,8 +26,7 @@ paging = True
# search url
url = "https://freesound.org/apiv2/"
search_url = (
- url
- + "search/text/?query={query}&page={page}&fields=name,url,download,created,description,type&token={api_key}"
+ url + "search/text/?query={query}&page={page}&fields=name,url,download,created,description,type&token={api_key}"
)
embedded_url = '<audio controls><source src="{uri}" type="audio/{ftype}"></audio>'
diff --git a/searx/engines/frinkiac.py b/searx/engines/frinkiac.py
index f43bb6e20..95a1366de 100644
--- a/searx/engines/frinkiac.py
+++ b/searx/engines/frinkiac.py
@@ -10,10 +10,7 @@ from urllib.parse import urlencode
about = {
"website": 'https://frinkiac.com',
"wikidata_id": 'Q24882614',
- "official_api_documentation": {
- 'url': None,
- 'comment': 'see https://github.com/MitchellAW/CompuGlobal'
- },
+ "official_api_documentation": {'url': None, 'comment': 'see https://github.com/MitchellAW/CompuGlobal'},
"use_official_api": False,
"require_api_key": False,
"results": 'JSON',
@@ -40,12 +37,15 @@ def response(resp):
episode = result['Episode']
timestamp = result['Timestamp']
- results.append({'template': 'images.html',
- 'url': RESULT_URL.format(base=BASE,
- query=urlencode({'p': 'caption', 'e': episode, 't': timestamp})),
- 'title': episode,
- 'content': '',
- 'thumbnail_src': THUMB_URL.format(base=BASE, episode=episode, timestamp=timestamp),
- 'img_src': IMAGE_URL.format(base=BASE, episode=episode, timestamp=timestamp)})
+ results.append(
+ {
+ 'template': 'images.html',
+ 'url': RESULT_URL.format(base=BASE, query=urlencode({'p': 'caption', 'e': episode, 't': timestamp})),
+ 'title': episode,
+ 'content': '',
+ 'thumbnail_src': THUMB_URL.format(base=BASE, episode=episode, timestamp=timestamp),
+ 'img_src': IMAGE_URL.format(base=BASE, episode=episode, timestamp=timestamp),
+ }
+ )
return results
diff --git a/searx/engines/gentoo.py b/searx/engines/gentoo.py
index 325e132a6..5b9edafe0 100644
--- a/searx/engines/gentoo.py
+++ b/searx/engines/gentoo.py
@@ -37,15 +37,12 @@ def locale_to_lang_code(locale):
# wikis for some languages were moved off from the main site, we need to make
# requests to correct URLs to be able to get results in those languages
lang_urls = {
- 'en': {
- 'base': 'https://wiki.gentoo.org',
- 'search': '/index.php?title=Special:Search&offset={offset}&{query}'
- },
+ 'en': {'base': 'https://wiki.gentoo.org', 'search': '/index.php?title=Special:Search&offset={offset}&{query}'},
'others': {
'base': 'https://wiki.gentoo.org',
'search': '/index.php?title=Special:Search&offset={offset}&{query}\
- &profile=translation&languagefilter={language}'
- }
+ &profile=translation&languagefilter={language}',
+ },
}
@@ -78,7 +75,7 @@ main_langs = {
'sl': 'Slovenský',
'th': 'ไทย',
'uk': 'Українська',
- 'zh': '简体中文'
+ 'zh': '简体中文',
}
supported_languages = dict(lang_urls, **main_langs)
@@ -101,8 +98,7 @@ def request(query, params):
urls = get_lang_urls(language)
search_url = urls['base'] + urls['search']
- params['url'] = search_url.format(query=query, offset=offset,
- language=language)
+ params['url'] = search_url.format(query=query, offset=offset, language=language)
return params
@@ -123,7 +119,6 @@ def response(resp):
href = urljoin(base_url, link.attrib.get('href'))
title = extract_text(link)
- results.append({'url': href,
- 'title': title})
+ results.append({'url': href, 'title': title})
return results
diff --git a/searx/engines/gigablast.py b/searx/engines/gigablast.py
index 0f685abc5..c657dca30 100644
--- a/searx/engines/gigablast.py
+++ b/searx/engines/gigablast.py
@@ -55,12 +55,12 @@ def fetch_extra_param(query_args, headers):
extra_param_path = search_path + urlencode(query_args)
text = get(base_url + extra_param_path, headers=headers).text
- re_var= None
+ re_var = None
for line in text.splitlines():
if re_var is None and extra_param_path in line:
var = line.split("=")[0].split()[1] # e.g. var --> 'uxrl'
re_var = re.compile(var + "\\s*=\\s*" + var + "\\s*\\+\\s*'" + "(.*)" + "'(.*)")
- extra_param = line.split("'")[1][len(extra_param_path):]
+ extra_param = line.split("'")[1][len(extra_param_path) :]
continue
if re_var is not None and re_var.search(line):
extra_param += re_var.search(line).group(1)
@@ -69,12 +69,7 @@ def fetch_extra_param(query_args, headers):
# do search-request
def request(query, params): # pylint: disable=unused-argument
- query_args = dict(
- c = 'main'
- , q = query
- , dr = 1
- , showgoodimages = 0
- )
+ query_args = dict(c='main', q=query, dr=1, showgoodimages=0)
if params['language'] and params['language'] != 'all':
query_args['qlangcountry'] = params['language']
@@ -93,6 +88,7 @@ def request(query, params): # pylint: disable=unused-argument
return params
+
# get response from search-request
def response(resp):
results = []
@@ -125,10 +121,6 @@ def response(resp):
if len(subtitle) > 3 and subtitle != title:
title += " - " + subtitle
- results.append(dict(
- url = url
- , title = title
- , content = content
- ))
+ results.append(dict(url=url, title=title, content=content))
return results
diff --git a/searx/engines/github.py b/searx/engines/github.py
index b68caa350..1d12d296a 100644
--- a/searx/engines/github.py
+++ b/searx/engines/github.py
@@ -55,9 +55,7 @@ def response(resp):
content = ''
# append result
- results.append({'url': url,
- 'title': title,
- 'content': content})
+ results.append({'url': url, 'title': title, 'content': content})
# return results
return results
diff --git a/searx/engines/google.py b/searx/engines/google.py
index 578dec60c..685697d29 100644
--- a/searx/engines/google.py
+++ b/searx/engines/google.py
@@ -50,72 +50,63 @@ supported_languages_url = 'https://www.google.com/preferences?#languages'
# based on https://en.wikipedia.org/wiki/List_of_Google_domains and tests
google_domains = {
- 'BG': 'google.bg', # Bulgaria
- 'CZ': 'google.cz', # Czech Republic
- 'DE': 'google.de', # Germany
- 'DK': 'google.dk', # Denmark
- 'AT': 'google.at', # Austria
- 'CH': 'google.ch', # Switzerland
- 'GR': 'google.gr', # Greece
+ 'BG': 'google.bg', # Bulgaria
+ 'CZ': 'google.cz', # Czech Republic
+ 'DE': 'google.de', # Germany
+ 'DK': 'google.dk', # Denmark
+ 'AT': 'google.at', # Austria
+ 'CH': 'google.ch', # Switzerland
+ 'GR': 'google.gr', # Greece
'AU': 'google.com.au', # Australia
- 'CA': 'google.ca', # Canada
- 'GB': 'google.co.uk', # United Kingdom
- 'ID': 'google.co.id', # Indonesia
- 'IE': 'google.ie', # Ireland
- 'IN': 'google.co.in', # India
+ 'CA': 'google.ca', # Canada
+ 'GB': 'google.co.uk', # United Kingdom
+ 'ID': 'google.co.id', # Indonesia
+ 'IE': 'google.ie', # Ireland
+ 'IN': 'google.co.in', # India
'MY': 'google.com.my', # Malaysia
- 'NZ': 'google.co.nz', # New Zealand
+ 'NZ': 'google.co.nz', # New Zealand
'PH': 'google.com.ph', # Philippines
'SG': 'google.com.sg', # Singapore
- 'US': 'google.com', # United States (google.us) redirects to .com
- 'ZA': 'google.co.za', # South Africa
+ 'US': 'google.com', # United States (google.us) redirects to .com
+ 'ZA': 'google.co.za', # South Africa
'AR': 'google.com.ar', # Argentina
- 'CL': 'google.cl', # Chile
- 'ES': 'google.es', # Spain
+ 'CL': 'google.cl', # Chile
+ 'ES': 'google.es', # Spain
'MX': 'google.com.mx', # Mexico
- 'EE': 'google.ee', # Estonia
- 'FI': 'google.fi', # Finland
- 'BE': 'google.be', # Belgium
- 'FR': 'google.fr', # France
- 'IL': 'google.co.il', # Israel
- 'HR': 'google.hr', # Croatia
- 'HU': 'google.hu', # Hungary
- 'IT': 'google.it', # Italy
- 'JP': 'google.co.jp', # Japan
- 'KR': 'google.co.kr', # South Korea
- 'LT': 'google.lt', # Lithuania
- 'LV': 'google.lv', # Latvia
- 'NO': 'google.no', # Norway
- 'NL': 'google.nl', # Netherlands
- 'PL': 'google.pl', # Poland
+ 'EE': 'google.ee', # Estonia
+ 'FI': 'google.fi', # Finland
+ 'BE': 'google.be', # Belgium
+ 'FR': 'google.fr', # France
+ 'IL': 'google.co.il', # Israel
+ 'HR': 'google.hr', # Croatia
+ 'HU': 'google.hu', # Hungary
+ 'IT': 'google.it', # Italy
+ 'JP': 'google.co.jp', # Japan
+ 'KR': 'google.co.kr', # South Korea
+ 'LT': 'google.lt', # Lithuania
+ 'LV': 'google.lv', # Latvia
+ 'NO': 'google.no', # Norway
+ 'NL': 'google.nl', # Netherlands
+ 'PL': 'google.pl', # Poland
'BR': 'google.com.br', # Brazil
- 'PT': 'google.pt', # Portugal
- 'RO': 'google.ro', # Romania
- 'RU': 'google.ru', # Russia
- 'SK': 'google.sk', # Slovakia
- 'SI': 'google.si', # Slovenia
- 'SE': 'google.se', # Sweden
- 'TH': 'google.co.th', # Thailand
+ 'PT': 'google.pt', # Portugal
+ 'RO': 'google.ro', # Romania
+ 'RU': 'google.ru', # Russia
+ 'SK': 'google.sk', # Slovakia
+ 'SI': 'google.si', # Slovenia
+ 'SE': 'google.se', # Sweden
+ 'TH': 'google.co.th', # Thailand
'TR': 'google.com.tr', # Turkey
'UA': 'google.com.ua', # Ukraine
'CN': 'google.com.hk', # There is no google.cn, we use .com.hk for zh-CN
'HK': 'google.com.hk', # Hong Kong
- 'TW': 'google.com.tw' # Taiwan
+ 'TW': 'google.com.tw', # Taiwan
}
-time_range_dict = {
- 'day': 'd',
- 'week': 'w',
- 'month': 'm',
- 'year': 'y'
-}
+time_range_dict = {'day': 'd', 'week': 'w', 'month': 'm', 'year': 'y'}
# Filter results. 0: None, 1: Moderate, 2: Strict
-filter_mapping = {
- 0: 'off',
- 1: 'medium',
- 2: 'high'
-}
+filter_mapping = {0: 'off', 1: 'medium', 2: 'high'}
# specific xpath variables
# ------------------------
@@ -140,6 +131,7 @@ content_xpath = './/div[@class="IsZvec"]'
# from the links not the links itself.
suggestion_xpath = '//div[contains(@class, "EIaa9b")]//a'
+
def get_lang_info(params, lang_list, custom_aliases, supported_any_language):
"""Composing various language properties for the google engines.
@@ -184,11 +176,11 @@ def get_lang_info(params, lang_list, custom_aliases, supported_any_language):
request's headers)
"""
ret_val = {
- 'language' : None,
- 'country' : None,
- 'subdomain' : None,
- 'params' : {},
- 'headers' : {},
+ 'language': None,
+ 'country': None,
+ 'subdomain': None,
+ 'params': {},
+ 'headers': {},
}
# language ...
@@ -213,7 +205,7 @@ def get_lang_info(params, lang_list, custom_aliases, supported_any_language):
# subdomain ...
- ret_val['subdomain'] = 'www.' + google_domains.get(country.upper(), 'google.com')
+ ret_val['subdomain'] = 'www.' + google_domains.get(country.upper(), 'google.com')
# params & headers
@@ -250,15 +242,18 @@ def get_lang_info(params, lang_list, custom_aliases, supported_any_language):
ret_val['params']['lr'] = "lang_" + lang_list.get(lang_country, language)
# Accept-Language: fr-CH, fr;q=0.8, en;q=0.6, *;q=0.5
- ret_val['headers']['Accept-Language'] = ','.join([
- lang_country,
- language + ';q=0.8,',
- 'en;q=0.6',
- '*;q=0.5',
- ])
+ ret_val['headers']['Accept-Language'] = ','.join(
+ [
+ lang_country,
+ language + ';q=0.8,',
+ 'en;q=0.6',
+ '*;q=0.5',
+ ]
+ )
return ret_val
+
def detect_google_sorry(resp):
if resp.url.host == 'sorry.google.com' or resp.url.path.startswith('/sorry'):
raise SearxEngineCaptchaException()
@@ -269,9 +264,7 @@ def request(query, params):
offset = (params['pageno'] - 1) * 10
- lang_info = get_lang_info(
- params, supported_languages, language_aliases, True
- )
+ lang_info = get_lang_info(params, supported_languages, language_aliases, True)
additional_parameters = {}
if use_mobile_ui:
@@ -281,15 +274,23 @@ def request(query, params):
}
# https://www.google.de/search?q=corona&hl=de&lr=lang_de&start=0&tbs=qdr%3Ad&safe=medium
- query_url = 'https://' + lang_info['subdomain'] + '/search' + "?" + urlencode({
- 'q': query,
- **lang_info['params'],
- 'ie': "utf8",
- 'oe': "utf8",
- 'start': offset,
- 'filter': '0',
- **additional_parameters,
- })
+ query_url = (
+ 'https://'
+ + lang_info['subdomain']
+ + '/search'
+ + "?"
+ + urlencode(
+ {
+ 'q': query,
+ **lang_info['params'],
+ 'ie': "utf8",
+ 'oe': "utf8",
+ 'start': offset,
+ 'filter': '0',
+ **additional_parameters,
+ }
+ )
+ )
if params['time_range'] in time_range_dict:
query_url += '&' + urlencode({'tbs': 'qdr:' + time_range_dict[params['time_range']]})
@@ -301,9 +302,7 @@ def request(query, params):
if use_mobile_ui:
params['headers']['Accept'] = '*/*'
else:
- params['headers']['Accept'] = (
- 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'
- )
+ params['headers']['Accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'
return params
@@ -325,7 +324,7 @@ def response(resp):
else:
logger.debug("did not find 'answer'")
- # results --> number_of_results
+ # results --> number_of_results
if not use_mobile_ui:
try:
_txt = eval_xpath_getindex(dom, '//div[@id="result-stats"]//text()', 0)
@@ -355,11 +354,7 @@ def response(resp):
if url is None:
continue
content = extract_text(eval_xpath_getindex(result, content_xpath, 0, default=None), allow_none=True)
- results.append({
- 'url': url,
- 'title': title,
- 'content': content
- })
+ results.append({'url': url, 'title': title, 'content': content})
except Exception as e: # pylint: disable=broad-except
logger.error(e, exc_info=True)
# from lxml import etree
diff --git a/searx/engines/google_images.py b/searx/engines/google_images.py
index 61d291e3f..203df404a 100644
--- a/searx/engines/google_images.py
+++ b/searx/engines/google_images.py
@@ -30,10 +30,8 @@ from searx.engines.google import (
)
# pylint: disable=unused-import
-from searx.engines.google import (
- supported_languages_url
- , _fetch_supported_languages
-)
+from searx.engines.google import supported_languages_url, _fetch_supported_languages
+
# pylint: enable=unused-import
# about
@@ -53,21 +51,16 @@ use_locale_domain = True
time_range_support = True
safesearch = True
-filter_mapping = {
- 0: 'images',
- 1: 'active',
- 2: 'active'
-}
+filter_mapping = {0: 'images', 1: 'active', 2: 'active'}
def scrap_out_thumbs(dom):
- """Scrap out thumbnail data from <script> tags.
- """
+ """Scrap out thumbnail data from <script> tags."""
ret_val = {}
for script in eval_xpath(dom, '//script[contains(., "_setImgSrc(")]'):
_script = script.text
# _setImgSrc('0','data:image\/jpeg;base64,\/9j\/4AAQSkZJR ....');
- _thumb_no, _img_data = _script[len("_setImgSrc("):-2].split(",", 1)
+ _thumb_no, _img_data = _script[len("_setImgSrc(") : -2].split(",", 1)
_thumb_no = _thumb_no.replace("'", "")
_img_data = _img_data.replace("'", "")
_img_data = _img_data.replace(r"\/", r"/")
@@ -76,8 +69,7 @@ def scrap_out_thumbs(dom):
def scrap_img_by_id(script, data_id):
- """Get full image URL by data-id in parent element
- """
+ """Get full image URL by data-id in parent element"""
img_url = ''
_script = script.split('\n')
for i, line in enumerate(_script):
@@ -91,20 +83,25 @@ def scrap_img_by_id(script, data_id):
def request(query, params):
"""Google-Video search request"""
- lang_info = get_lang_info(
- params, supported_languages, language_aliases, False
+ lang_info = get_lang_info(params, supported_languages, language_aliases, False)
+ logger.debug("HTTP header Accept-Language --> %s", lang_info['headers']['Accept-Language'])
+
+ query_url = (
+ 'https://'
+ + lang_info['subdomain']
+ + '/search'
+ + "?"
+ + urlencode(
+ {
+ 'q': query,
+ 'tbm': "isch",
+ **lang_info['params'],
+ 'ie': "utf8",
+ 'oe': "utf8",
+ 'num': 30,
+ }
+ )
)
- logger.debug(
- "HTTP header Accept-Language --> %s", lang_info['headers']['Accept-Language'])
-
- query_url = 'https://' + lang_info['subdomain'] + '/search' + "?" + urlencode({
- 'q': query,
- 'tbm': "isch",
- **lang_info['params'],
- 'ie': "utf8",
- 'oe': "utf8",
- 'num': 30,
- })
if params['time_range'] in time_range_dict:
query_url += '&' + urlencode({'tbs': 'qdr:' + time_range_dict[params['time_range']]})
@@ -113,9 +110,7 @@ def request(query, params):
params['url'] = query_url
params['headers'].update(lang_info['headers'])
- params['headers']['Accept'] = (
- 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'
- )
+ params['headers']['Accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'
return params
@@ -128,8 +123,7 @@ def response(resp):
# convert the text to dom
dom = html.fromstring(resp.text)
img_bas64_map = scrap_out_thumbs(dom)
- img_src_script = eval_xpath_getindex(
- dom, '//script[contains(., "AF_initDataCallback({key: ")]', 1).text
+ img_src_script = eval_xpath_getindex(dom, '//script[contains(., "AF_initDataCallback({key: ")]', 1).text
# parse results
#
@@ -189,15 +183,17 @@ def response(resp):
if not src_url:
src_url = thumbnail_src
- results.append({
- 'url': url,
- 'title': img_alt,
- 'content': pub_descr,
- 'source': pub_source,
- 'img_src': src_url,
- # 'img_format': img_format,
- 'thumbnail_src': thumbnail_src,
- 'template': 'images.html'
- })
+ results.append(
+ {
+ 'url': url,
+ 'title': img_alt,
+ 'content': pub_descr,
+ 'source': pub_source,
+ 'img_src': src_url,
+ # 'img_format': img_format,
+ 'thumbnail_src': thumbnail_src,
+ 'template': 'images.html',
+ }
+ )
return results
diff --git a/searx/engines/google_news.py b/searx/engines/google_news.py
index 87ac9a19d..162e4348e 100644
--- a/searx/engines/google_news.py
+++ b/searx/engines/google_news.py
@@ -32,6 +32,7 @@ from searx.engines.google import (
supported_languages_url,
_fetch_supported_languages,
)
+
# pylint: enable=unused-import
from searx.engines.google import (
@@ -71,14 +72,12 @@ time_range_support = True
# safesearch : results are identitical for safesearch=0 and safesearch=2
safesearch = False
+
def request(query, params):
"""Google-News search request"""
- lang_info = get_lang_info(
- params, supported_languages, language_aliases, False
- )
- logger.debug(
- "HTTP header Accept-Language --> %s", lang_info['headers']['Accept-Language'])
+ lang_info = get_lang_info(params, supported_languages, language_aliases, False)
+ logger.debug("HTTP header Accept-Language --> %s", lang_info['headers']['Accept-Language'])
# google news has only one domain
lang_info['subdomain'] = 'news.google.com'
@@ -94,19 +93,26 @@ def request(query, params):
if params['time_range']:
query += ' ' + time_range_dict[params['time_range']]
- query_url = 'https://' + lang_info['subdomain'] + '/search' + "?" + urlencode({
- 'q': query,
- **lang_info['params'],
- 'ie': "utf8",
- 'oe': "utf8",
- 'gl': lang_info['country'],
- }) + ('&ceid=%s' % ceid) # ceid includes a ':' character which must not be urlencoded
+ query_url = (
+ 'https://'
+ + lang_info['subdomain']
+ + '/search'
+ + "?"
+ + urlencode(
+ {
+ 'q': query,
+ **lang_info['params'],
+ 'ie': "utf8",
+ 'oe': "utf8",
+ 'gl': lang_info['country'],
+ }
+ )
+ + ('&ceid=%s' % ceid)
+ ) # ceid includes a ':' character which must not be urlencoded
params['url'] = query_url
params['headers'].update(lang_info['headers'])
- params['headers']['Accept'] = (
- 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'
- )
+ params['headers']['Accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'
params['headers']['Cookie'] = "CONSENT=YES+cb.%s-14-p0.en+F+941;" % datetime.now().strftime("%Y%m%d")
return params
@@ -141,7 +147,7 @@ def response(resp):
# jslog="95014; 5:W251bGwsbnVsbCxudW...giXQ==; track:click"
jslog = jslog.split(";")[1].split(':')[1].strip()
try:
- padding = (4 -(len(jslog) % 4)) * "="
+ padding = (4 - (len(jslog) % 4)) * "="
jslog = b64decode(jslog + padding)
except binascii.Error:
# URL cant be read, skip this result
@@ -178,12 +184,14 @@ def response(resp):
img_src = extract_text(result.xpath('preceding-sibling::a/figure/img/@src'))
- results.append({
- 'url': url,
- 'title': title,
- 'content': content,
- 'img_src': img_src,
- })
+ results.append(
+ {
+ 'url': url,
+ 'title': title,
+ 'content': content,
+ 'img_src': img_src,
+ }
+ )
# return results
return results
diff --git a/searx/engines/google_scholar.py b/searx/engines/google_scholar.py
index e6726463d..e0700957c 100644
--- a/searx/engines/google_scholar.py
+++ b/searx/engines/google_scholar.py
@@ -32,6 +32,7 @@ from searx.engines.google import (
supported_languages_url,
_fetch_supported_languages,
)
+
# pylint: enable=unused-import
# about
@@ -52,6 +53,7 @@ use_locale_domain = True
time_range_support = True
safesearch = False
+
def time_range_url(params):
"""Returns a URL query component for a google-Scholar time range based on
``params['time_range']``. Google-Scholar does only support ranges in years.
@@ -64,7 +66,7 @@ def time_range_url(params):
# as_ylo=2016&as_yhi=2019
ret_val = ''
if params['time_range'] in time_range_dict:
- ret_val= urlencode({'as_ylo': datetime.now().year -1 })
+ ret_val = urlencode({'as_ylo': datetime.now().year - 1})
return '&' + ret_val
@@ -72,34 +74,38 @@ def request(query, params):
"""Google-Scholar search request"""
offset = (params['pageno'] - 1) * 10
- lang_info = get_lang_info(
- params, supported_languages, language_aliases, False
- )
- logger.debug(
- "HTTP header Accept-Language --> %s", lang_info['headers']['Accept-Language'])
+ lang_info = get_lang_info(params, supported_languages, language_aliases, False)
+ logger.debug("HTTP header Accept-Language --> %s", lang_info['headers']['Accept-Language'])
# subdomain is: scholar.google.xy
lang_info['subdomain'] = lang_info['subdomain'].replace("www.", "scholar.")
- query_url = 'https://'+ lang_info['subdomain'] + '/scholar' + "?" + urlencode({
- 'q': query,
- **lang_info['params'],
- 'ie': "utf8",
- 'oe': "utf8",
- 'start' : offset,
- })
+ query_url = (
+ 'https://'
+ + lang_info['subdomain']
+ + '/scholar'
+ + "?"
+ + urlencode(
+ {
+ 'q': query,
+ **lang_info['params'],
+ 'ie': "utf8",
+ 'oe': "utf8",
+ 'start': offset,
+ }
+ )
+ )
query_url += time_range_url(params)
params['url'] = query_url
params['headers'].update(lang_info['headers'])
- params['headers']['Accept'] = (
- 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'
- )
+ params['headers']['Accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'
- #params['google_subdomain'] = subdomain
+ # params['google_subdomain'] = subdomain
return params
+
def response(resp):
"""Get response from google's search request"""
results = []
@@ -132,11 +138,13 @@ def response(resp):
if pub_type:
title = title + " " + pub_type
- results.append({
- 'url': url,
- 'title': title,
- 'content': content,
- })
+ results.append(
+ {
+ 'url': url,
+ 'title': title,
+ 'content': content,
+ }
+ )
# parse suggestion
for suggestion in eval_xpath(dom, '//div[contains(@class, "gs_qsuggest_wrap")]//li//a'):
diff --git a/searx/engines/google_videos.py b/searx/engines/google_videos.py
index 77b0ab260..049f9138c 100644
--- a/searx/engines/google_videos.py
+++ b/searx/engines/google_videos.py
@@ -38,10 +38,8 @@ from searx.engines.google import (
)
# pylint: disable=unused-import
-from searx.engines.google import (
- supported_languages_url
- , _fetch_supported_languages
-)
+from searx.engines.google import supported_languages_url, _fetch_supported_languages
+
# pylint: enable=unused-import
# about
@@ -65,6 +63,7 @@ safesearch = True
RE_CACHE = {}
+
def _re(regexpr):
"""returns compiled regular expression"""
RE_CACHE[regexpr] = RE_CACHE.get(regexpr, re.compile(regexpr))
@@ -77,18 +76,17 @@ def scrap_out_thumbs_src(dom):
for script in eval_xpath_list(dom, '//script[contains(., "google.ldi={")]'):
_script = script.text
# "dimg_35":"https://i.ytimg.c....",
- _dimurl = _re("s='([^']*)").findall( _script)
- for k,v in _re('(' + thumb_name + '[0-9]*)":"(http[^"]*)' ).findall(_script):
- v = v.replace(r'\u003d','=')
- v = v.replace(r'\u0026','&')
+ _dimurl = _re("s='([^']*)").findall(_script)
+ for k, v in _re('(' + thumb_name + '[0-9]*)":"(http[^"]*)').findall(_script):
+ v = v.replace(r'\u003d', '=')
+ v = v.replace(r'\u0026', '&')
ret_val[k] = v
logger.debug("found %s imgdata for: %s", thumb_name, ret_val.keys())
return ret_val
def scrap_out_thumbs(dom):
- """Scrap out thumbnail data from <script> tags.
- """
+ """Scrap out thumbnail data from <script> tags."""
ret_val = {}
thumb_name = 'dimg_'
@@ -96,7 +94,7 @@ def scrap_out_thumbs(dom):
_script = script.text
# var s='data:image/jpeg;base64, ...'
- _imgdata = _re("s='([^']*)").findall( _script)
+ _imgdata = _re("s='([^']*)").findall(_script)
if not _imgdata:
continue
@@ -112,19 +110,24 @@ def scrap_out_thumbs(dom):
def request(query, params):
"""Google-Video search request"""
- lang_info = get_lang_info(
- params, supported_languages, language_aliases, False
+ lang_info = get_lang_info(params, supported_languages, language_aliases, False)
+ logger.debug("HTTP header Accept-Language --> %s", lang_info['headers']['Accept-Language'])
+
+ query_url = (
+ 'https://'
+ + lang_info['subdomain']
+ + '/search'
+ + "?"
+ + urlencode(
+ {
+ 'q': query,
+ 'tbm': "vid",
+ **lang_info['params'],
+ 'ie': "utf8",
+ 'oe': "utf8",
+ }
+ )
)
- logger.debug(
- "HTTP header Accept-Language --> %s", lang_info['headers']['Accept-Language'])
-
- query_url = 'https://' + lang_info['subdomain'] + '/search' + "?" + urlencode({
- 'q': query,
- 'tbm': "vid",
- **lang_info['params'],
- 'ie': "utf8",
- 'oe': "utf8",
- })
if params['time_range'] in time_range_dict:
query_url += '&' + urlencode({'tbs': 'qdr:' + time_range_dict[params['time_range']]})
@@ -133,9 +136,7 @@ def request(query, params):
params['url'] = query_url
params['headers'].update(lang_info['headers'])
- params['headers']['Accept'] = (
- 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'
- )
+ params['headers']['Accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'
return params
@@ -171,21 +172,22 @@ def response(resp):
title = extract_text(eval_xpath_getindex(result, title_xpath, 0))
url = eval_xpath_getindex(result, './/div[@class="dXiKIc"]//a/@href', 0)
- length = extract_text(eval_xpath(
- result, './/div[contains(@class, "P7xzyf")]/span/span'))
+ length = extract_text(eval_xpath(result, './/div[contains(@class, "P7xzyf")]/span/span'))
c_node = eval_xpath_getindex(result, './/div[@class="Uroaid"]', 0)
content = extract_text(c_node)
pub_info = extract_text(eval_xpath(result, './/div[@class="Zg1NU"]'))
- results.append({
- 'url': url,
- 'title': title,
- 'content': content,
- 'length': length,
- 'author': pub_info,
- 'thumbnail': img_src,
- 'template': 'videos.html',
- })
+ results.append(
+ {
+ 'url': url,
+ 'title': title,
+ 'content': content,
+ 'length': length,
+ 'author': pub_info,
+ 'thumbnail': img_src,
+ 'template': 'videos.html',
+ }
+ )
# parse suggestion
for suggestion in eval_xpath_list(dom, suggestion_xpath):
diff --git a/searx/engines/imdb.py b/searx/engines/imdb.py
index a7474fd5b..bb6258cf4 100644
--- a/searx/engines/imdb.py
+++ b/searx/engines/imdb.py
@@ -27,7 +27,9 @@ about = {
"results": 'HTML',
}
-categories = ['general', ]
+categories = [
+ 'general',
+]
paging = False
# suggestion_url = "https://sg.media-imdb.com/suggestion/{letter}/{query}.json"
@@ -35,13 +37,7 @@ suggestion_url = "https://v2.sg.media-imdb.com/suggestion/{letter}/{query}.json"
href_base = 'https://imdb.com/{category}/{entry_id}'
-search_categories = {
- "nm": "name",
- "tt": "title",
- "kw": "keyword",
- "co": "company",
- "ep": "episode"
-}
+search_categories = {"nm": "name", "tt": "title", "kw": "keyword", "co": "company", "ep": "episode"}
def request(query, params):
@@ -63,9 +59,7 @@ def response(resp):
entry_id = entry['id']
categ = search_categories.get(entry_id[:2])
if categ is None:
- logger.error(
- 'skip unknown category tag %s in %s', entry_id[:2], entry_id
- )
+ logger.error('skip unknown category tag %s in %s', entry_id[:2], entry_id)
continue
title = entry['l']
@@ -95,11 +89,13 @@ def response(resp):
if not image_url_name.endswith('_V1_'):
magic = '_V1_' + magic
image_url = image_url_name + magic + '.' + image_url_prefix
- results.append({
- "title": title,
- "url": href_base.format(category=categ, entry_id=entry_id),
- "content": content,
- "img_src" : image_url,
- })
+ results.append(
+ {
+ "title": title,
+ "url": href_base.format(category=categ, entry_id=entry_id),
+ "content": content,
+ "img_src": image_url,
+ }
+ )
return results
diff --git a/searx/engines/ina.py b/searx/engines/ina.py
index 81172ef8c..1e21bcef8 100644
--- a/searx/engines/ina.py
+++ b/searx/engines/ina.py
@@ -41,9 +41,7 @@ content_xpath = './/p[@class="media-body__summary"]'
# do search-request
def request(query, params):
- params['url'] = search_url.format(ps=page_size,
- start=params['pageno'] * page_size,
- query=urlencode({'q': query}))
+ params['url'] = search_url.format(ps=page_size, start=params['pageno'] * page_size, query=urlencode({'q': query}))
return params
@@ -75,12 +73,16 @@ def response(resp):
content = extract_text(result.xpath(content_xpath))
# append result
- results.append({'url': url,
- 'title': title,
- 'content': content,
- 'template': 'videos.html',
- 'publishedDate': publishedDate,
- 'thumbnail': thumbnail})
+ results.append(
+ {
+ 'url': url,
+ 'title': title,
+ 'content': content,
+ 'template': 'videos.html',
+ 'publishedDate': publishedDate,
+ 'thumbnail': thumbnail,
+ }
+ )
# return results
return results
diff --git a/searx/engines/invidious.py b/searx/engines/invidious.py
index 5d0b00edb..914615d6f 100644
--- a/searx/engines/invidious.py
+++ b/searx/engines/invidious.py
@@ -46,14 +46,10 @@ def request(query, params):
base_url_rand = base_url
search_url = base_url_rand + "api/v1/search?q={query}"
- params["url"] = search_url.format(
- query=quote_plus(query)
- ) + "&page={pageno}".format(pageno=params["pageno"])
+ params["url"] = search_url.format(query=quote_plus(query)) + "&page={pageno}".format(pageno=params["pageno"])
if params["time_range"] in time_range_dict:
- params["url"] += "&date={timerange}".format(
- timerange=time_range_dict[params["time_range"]]
- )
+ params["url"] += "&date={timerange}".format(timerange=time_range_dict[params["time_range"]])
if params["language"] != "all":
lang = params["language"].split("-")
@@ -88,17 +84,13 @@ def response(resp):
url = base_invidious_url + videoid
embedded = embedded_url.format(videoid=videoid)
thumbs = result.get("videoThumbnails", [])
- thumb = next(
- (th for th in thumbs if th["quality"] == "sddefault"), None
- )
+ thumb = next((th for th in thumbs if th["quality"] == "sddefault"), None)
if thumb:
thumbnail = thumb.get("url", "")
else:
thumbnail = ""
- publishedDate = parser.parse(
- time.ctime(result.get("published", 0))
- )
+ publishedDate = parser.parse(time.ctime(result.get("published", 0)))
length = time.gmtime(result.get("lengthSeconds"))
if length.tm_hour:
length = time.strftime("%H:%M:%S", length)
diff --git a/searx/engines/json_engine.py b/searx/engines/json_engine.py
index 8a04d34b2..f53bc0bf4 100644
--- a/searx/engines/json_engine.py
+++ b/searx/engines/json_engine.py
@@ -119,22 +119,22 @@ def response(resp):
content = query(result, content_query)[0]
except:
content = ""
- results.append({
- 'url': to_string(url),
- 'title': title_filter(to_string(title)),
- 'content': content_filter(to_string(content)),
- })
+ results.append(
+ {
+ 'url': to_string(url),
+ 'title': title_filter(to_string(title)),
+ 'content': content_filter(to_string(content)),
+ }
+ )
else:
- for url, title, content in zip(
- query(json, url_query),
- query(json, title_query),
- query(json, content_query)
- ):
- results.append({
- 'url': to_string(url),
- 'title': title_filter(to_string(title)),
- 'content': content_filter(to_string(content)),
- })
+ for url, title, content in zip(query(json, url_query), query(json, title_query), query(json, content_query)):
+ results.append(
+ {
+ 'url': to_string(url),
+ 'title': title_filter(to_string(title)),
+ 'content': content_filter(to_string(content)),
+ }
+ )
if not suggestion_query:
return results
diff --git a/searx/engines/kickass.py b/searx/engines/kickass.py
index ad451dbb1..26364674c 100644
--- a/searx/engines/kickass.py
+++ b/searx/engines/kickass.py
@@ -34,8 +34,7 @@ content_xpath = './/span[@class="font11px lightgrey block"]'
# do search-request
def request(query, params):
- params['url'] = search_url.format(search_term=quote(query),
- pageno=params['pageno'])
+ params['url'] = search_url.format(search_term=quote(query), pageno=params['pageno'])
return params
@@ -79,16 +78,20 @@ def response(resp):
torrentfileurl = quote(torrentfile, safe="%/:=&?~#+!$,;'@()*")
# append result
- results.append({'url': href,
- 'title': title,
- 'content': content,
- 'seed': seed,
- 'leech': leech,
- 'filesize': filesize,
- 'files': files,
- 'magnetlink': magnetlink,
- 'torrentfile': torrentfileurl,
- 'template': 'torrent.html'})
+ results.append(
+ {
+ 'url': href,
+ 'title': title,
+ 'content': content,
+ 'seed': seed,
+ 'leech': leech,
+ 'filesize': filesize,
+ 'files': files,
+ 'magnetlink': magnetlink,
+ 'torrentfile': torrentfileurl,
+ 'template': 'torrent.html',
+ }
+ )
# return results sorted by seeder
return sorted(results, key=itemgetter('seed'), reverse=True)
diff --git a/searx/engines/loc.py b/searx/engines/loc.py
index 5c09ceff2..0b2f3a689 100644
--- a/searx/engines/loc.py
+++ b/searx/engines/loc.py
@@ -34,9 +34,7 @@ IMG_SRC_FIXES = {
def request(query, params):
- search_path = search_string.format(
- query=urlencode({'q': query}),
- page=params['pageno'])
+ search_path = search_string.format(query=urlencode({'q': query}), page=params['pageno'])
params['url'] = base_url + search_path
@@ -56,13 +54,15 @@ def response(resp):
break
else:
img_src = result['image']['thumb']
- results.append({
- 'url': result['links']['item'],
- 'title': result['title'],
- 'img_src': img_src,
- 'thumbnail_src': result['image']['thumb'],
- 'author': result['creator'],
- 'template': 'images.html'
- })
+ results.append(
+ {
+ 'url': result['links']['item'],
+ 'title': result['title'],
+ 'img_src': img_src,
+ 'thumbnail_src': result['image']['thumb'],
+ 'author': result['creator'],
+ 'template': 'images.html',
+ }
+ )
return results
diff --git a/searx/engines/mediathekviewweb.py b/searx/engines/mediathekviewweb.py
index d4cb853d4..991dcbc7b 100644
--- a/searx/engines/mediathekviewweb.py
+++ b/searx/engines/mediathekviewweb.py
@@ -22,29 +22,33 @@ paging = True
time_range_support = False
safesearch = False
+
def request(query, params):
params['url'] = 'https://mediathekviewweb.de/api/query'
params['method'] = 'POST'
params['headers']['Content-type'] = 'text/plain'
- params['data'] = dumps({
- 'queries' : [
- {
- 'fields' : [
- 'title',
- 'topic',
- ],
- 'query' : query
- },
- ],
- 'sortBy' : 'timestamp',
- 'sortOrder' : 'desc',
- 'future' : True,
- 'offset' : (params['pageno'] - 1 )* 10,
- 'size' : 10
- })
+ params['data'] = dumps(
+ {
+ 'queries': [
+ {
+ 'fields': [
+ 'title',
+ 'topic',
+ ],
+ 'query': query,
+ },
+ ],
+ 'sortBy': 'timestamp',
+ 'sortOrder': 'desc',
+ 'future': True,
+ 'offset': (params['pageno'] - 1) * 10,
+ 'size': 10,
+ }
+ )
return params
+
def response(resp):
resp = loads(resp.text)
@@ -58,11 +62,13 @@ def response(resp):
item['hms'] = str(datetime.timedelta(seconds=item['duration']))
- results.append({
- 'url' : item['url_video_hd'],
- 'title' : "%(channel)s: %(title)s (%(hms)s)" % item,
- 'length' : item['hms'],
- 'content' : "%(description)s" % item,
- })
+ results.append(
+ {
+ 'url': item['url_video_hd'],
+ 'title': "%(channel)s: %(title)s (%(hms)s)" % item,
+ 'length': item['hms'],
+ 'content': "%(description)s" % item,
+ }
+ )
return results
diff --git a/searx/engines/mediawiki.py b/searx/engines/mediawiki.py
index da4321250..9002e9ba7 100644
--- a/searx/engines/mediawiki.py
+++ b/searx/engines/mediawiki.py
@@ -25,23 +25,24 @@ search_type = 'nearmatch' # possible values: title, text, nearmatch
# search-url
base_url = 'https://{language}.wikipedia.org/'
-search_postfix = 'w/api.php?action=query'\
- '&list=search'\
- '&{query}'\
- '&format=json'\
- '&sroffset={offset}'\
- '&srlimit={limit}'\
+search_postfix = (
+ 'w/api.php?action=query'
+ '&list=search'
+ '&{query}'
+ '&format=json'
+ '&sroffset={offset}'
+ '&srlimit={limit}'
'&srwhat={searchtype}'
+)
# do search-request
def request(query, params):
offset = (params['pageno'] - 1) * number_of_results
- string_args = dict(query=urlencode({'srsearch': query}),
- offset=offset,
- limit=number_of_results,
- searchtype=search_type)
+ string_args = dict(
+ query=urlencode({'srsearch': query}), offset=offset, limit=number_of_results, searchtype=search_type
+ )
format_strings = list(Formatter().parse(base_url))
@@ -78,13 +79,14 @@ def response(resp):
for result in search_results['query']['search']:
if result.get('snippet', '').startswith('#REDIRECT'):
continue
- url = base_url.format(language=resp.search_params['language']) +\
- 'wiki/' + quote(result['title'].replace(' ', '_').encode())
+ url = (
+ base_url.format(language=resp.search_params['language'])
+ + 'wiki/'
+ + quote(result['title'].replace(' ', '_').encode())
+ )
# append result
- results.append({'url': url,
- 'title': result['title'],
- 'content': ''})
+ results.append({'url': url, 'title': result['title'], 'content': ''})
# return results
return results
diff --git a/searx/engines/microsoft_academic.py b/searx/engines/microsoft_academic.py
index c99611049..a869daf2f 100644
--- a/searx/engines/microsoft_academic.py
+++ b/searx/engines/microsoft_academic.py
@@ -26,17 +26,19 @@ def request(query, params):
params['url'] = search_url
params['method'] = 'POST'
params['headers']['content-type'] = 'application/json; charset=utf-8'
- params['data'] = dumps({
- 'query': query,
- 'queryExpression': '',
- 'filters': [],
- 'orderBy': 0,
- 'skip': (params['pageno'] - 1) * 10,
- 'sortAscending': True,
- 'take': 10,
- 'includeCitationContexts': False,
- 'profileId': '',
- })
+ params['data'] = dumps(
+ {
+ 'query': query,
+ 'queryExpression': '',
+ 'filters': [],
+ 'orderBy': 0,
+ 'skip': (params['pageno'] - 1) * 10,
+ 'sortAscending': True,
+ 'take': 10,
+ 'includeCitationContexts': False,
+ 'profileId': '',
+ }
+ )
return params
@@ -54,11 +56,13 @@ def response(resp):
title = result['paper']['dn']
content = _get_content(result['paper'])
url = _paper_url.format(id=result['paper']['id'])
- results.append({
- 'url': url,
- 'title': html_to_text(title),
- 'content': html_to_text(content),
- })
+ results.append(
+ {
+ 'url': url,
+ 'title': html_to_text(title),
+ 'content': html_to_text(content),
+ }
+ )
return results
diff --git a/searx/engines/mixcloud.py b/searx/engines/mixcloud.py
index a6fd1c0a1..f5e0f55fc 100644
--- a/searx/engines/mixcloud.py
+++ b/searx/engines/mixcloud.py
@@ -25,16 +25,17 @@ paging = True
url = 'https://api.mixcloud.com/'
search_url = url + 'search/?{query}&type=cloudcast&limit=10&offset={offset}'
-embedded_url = '<iframe scrolling="no" frameborder="0" allowTransparency="true" ' +\
- 'data-src="https://www.mixcloud.com/widget/iframe/?feed={url}" width="300" height="300"></iframe>'
+embedded_url = (
+ '<iframe scrolling="no" frameborder="0" allowTransparency="true" '
+ + 'data-src="https://www.mixcloud.com/widget/iframe/?feed={url}" width="300" height="300"></iframe>'
+)
# do search-request
def request(query, params):
offset = (params['pageno'] - 1) * 10
- params['url'] = search_url.format(query=urlencode({'q': query}),
- offset=offset)
+ params['url'] = search_url.format(query=urlencode({'q': query}), offset=offset)
return params
@@ -54,11 +55,9 @@ def response(resp):
publishedDate = parser.parse(result['created_time'])
# append result
- results.append({'url': url,
- 'title': title,
- 'embedded': embedded,
- 'publishedDate': publishedDate,
- 'content': content})
+ results.append(
+ {'url': url, 'title': title, 'embedded': embedded, 'publishedDate': publishedDate, 'content': content}
+ )
# return results
return results
diff --git a/searx/engines/mongodb.py b/searx/engines/mongodb.py
index 2ebb90539..c833ca9e0 100644
--- a/searx/engines/mongodb.py
+++ b/searx/engines/mongodb.py
@@ -26,38 +26,35 @@ result_template = 'key-value.html'
_client = None
+
def init(_):
connect()
+
def connect():
global _client # pylint: disable=global-statement
- kwargs = { 'port': port }
+ kwargs = {'port': port}
if username:
kwargs['username'] = username
if password:
kwargs['password'] = password
_client = MongoClient(host, **kwargs)[database][collection]
+
def search(query, params):
results = []
if exact_match_only:
- q = { '$eq': query }
+ q = {'$eq': query}
else:
- _re = re.compile('.*{0}.*'.format(re.escape(query)), re.I | re.M )
- q = { '$regex': _re }
+ _re = re.compile('.*{0}.*'.format(re.escape(query)), re.I | re.M)
+ q = {'$regex': _re}
- query = _client.find(
- {key: q}
- ).skip(
- ( params['pageno'] -1 ) * results_per_page
- ).limit(
- results_per_page
- )
+ query = _client.find({key: q}).skip((params['pageno'] - 1) * results_per_page).limit(results_per_page)
- results.append({ 'number_of_results': query.count() })
+ results.append({'number_of_results': query.count()})
for r in query:
del r['_id']
- r = { str(k):str(v) for k,v in r.items() }
+ r = {str(k): str(v) for k, v in r.items()}
r['template'] = result_template
results.append(r)
diff --git a/searx/engines/mysql_server.py b/searx/engines/mysql_server.py
index be89eb86e..d949ee0bc 100644
--- a/searx/engines/mysql_server.py
+++ b/searx/engines/mysql_server.py
@@ -20,6 +20,7 @@ paging = True
result_template = 'key-value.html'
_connection = None
+
def init(engine_settings):
global _connection # pylint: disable=global-statement
@@ -30,13 +31,14 @@ def init(engine_settings):
raise ValueError('only SELECT query is supported')
_connection = mysql.connector.connect(
- database = database,
- user = username,
- password = password,
- host = host,
+ database=database,
+ user=username,
+ password=password,
+ host=host,
auth_plugin=auth_plugin,
)
+
def search(query, params):
query_params = {'query': query}
query_to_run = query_str + ' LIMIT {0} OFFSET {1}'.format(limit, (params['pageno'] - 1) * limit)
@@ -46,6 +48,7 @@ def search(query, params):
return _fetch_results(cur)
+
def _fetch_results(cur):
results = []
for res in cur:
diff --git a/searx/engines/nyaa.py b/searx/engines/nyaa.py
index 4fe383efa..bdd3ea6dc 100644
--- a/searx/engines/nyaa.py
+++ b/searx/engines/nyaa.py
@@ -98,14 +98,18 @@ def response(resp):
content = 'Category: "{category}". Downloaded {downloads} times.'
content = content.format(category=category, downloads=downloads)
- results.append({'url': href,
- 'title': title,
- 'content': content,
- 'seed': seed,
- 'leech': leech,
- 'filesize': filesize,
- 'torrentfile': torrent_link,
- 'magnetlink': magnet_link,
- 'template': 'torrent.html'})
+ results.append(
+ {
+ 'url': href,
+ 'title': title,
+ 'content': content,
+ 'seed': seed,
+ 'leech': leech,
+ 'filesize': filesize,
+ 'torrentfile': torrent_link,
+ 'magnetlink': magnet_link,
+ 'template': 'torrent.html',
+ }
+ )
return results
diff --git a/searx/engines/openstreetmap.py b/searx/engines/openstreetmap.py
index c6211a004..946869834 100644
--- a/searx/engines/openstreetmap.py
+++ b/searx/engines/openstreetmap.py
@@ -151,10 +151,12 @@ def response(resp):
user_language = resp.search_params['language']
if resp.search_params['route']:
- results.append({
- 'answer': gettext('Get directions'),
- 'url': route_url.format(*resp.search_params['route'].groups()),
- })
+ results.append(
+ {
+ 'answer': gettext('Get directions'),
+ 'url': route_url.format(*resp.search_params['route'].groups()),
+ }
+ )
fetch_wikidata(nominatim_json, user_language)
@@ -170,26 +172,26 @@ def response(resp):
links, link_keys = get_links(result, user_language)
data = get_data(result, user_language, link_keys)
- results.append({
- 'template': 'map.html',
- 'title': title,
- 'address': address,
- 'address_label': get_key_label('addr', user_language),
- 'url': url,
- 'osm': osm,
- 'geojson': geojson,
- 'img_src': img_src,
- 'links': links,
- 'data': data,
- 'type': get_tag_label(
- result.get('category'), result.get('type', ''), user_language
- ),
- 'type_icon': result.get('icon'),
- 'content': '',
- 'longitude': result['lon'],
- 'latitude': result['lat'],
- 'boundingbox': result['boundingbox'],
- })
+ results.append(
+ {
+ 'template': 'map.html',
+ 'title': title,
+ 'address': address,
+ 'address_label': get_key_label('addr', user_language),
+ 'url': url,
+ 'osm': osm,
+ 'geojson': geojson,
+ 'img_src': img_src,
+ 'links': links,
+ 'data': data,
+ 'type': get_tag_label(result.get('category'), result.get('type', ''), user_language),
+ 'type_icon': result.get('icon'),
+ 'content': '',
+ 'longitude': result['lon'],
+ 'latitude': result['lat'],
+ 'boundingbox': result['boundingbox'],
+ }
+ )
return results
@@ -270,9 +272,9 @@ def get_title_address(result):
# https://github.com/osm-search/Nominatim/issues/1662
address_name = address_raw.get('address29')
else:
- address_name = address_raw.get(result['category'])
+ address_name = address_raw.get(result['category'])
elif result['type'] in address_raw:
- address_name = address_raw.get(result['type'])
+ address_name = address_raw.get(result['type'])
# add rest of adressdata, if something is already found
if address_name:
@@ -297,8 +299,7 @@ def get_title_address(result):
def get_url_osm_geojson(result):
- """Get url, osm and geojson
- """
+ """Get url, osm and geojson"""
osm_type = result.get('osm_type', result.get('type'))
if 'osm_id' not in result:
# see https://github.com/osm-search/Nominatim/issues/1521
@@ -349,11 +350,13 @@ def get_links(result, user_language):
url, url_label = mapping_function(raw_value)
if url.startswith('https://wikidata.org'):
url_label = result.get('wikidata', {}).get('itemLabel') or url_label
- links.append({
- 'label': get_key_label(k, user_language),
- 'url': url,
- 'url_label': url_label,
- })
+ links.append(
+ {
+ 'label': get_key_label(k, user_language),
+ 'url': url,
+ 'url_label': url_label,
+ }
+ )
link_keys.add(k)
return links, link_keys
@@ -373,11 +376,13 @@ def get_data(result, user_language, ignore_keys):
continue
k_label = get_key_label(k, user_language)
if k_label:
- data.append({
- 'label': k_label,
- 'key': k,
- 'value': v,
- })
+ data.append(
+ {
+ 'label': k_label,
+ 'key': k,
+ 'value': v,
+ }
+ )
data.sort(key=lambda entry: (get_key_rank(entry['key']), entry['label']))
return data
diff --git a/searx/engines/pdbe.py b/searx/engines/pdbe.py
index b9bbfaf1b..34c8d3227 100644
--- a/searx/engines/pdbe.py
+++ b/searx/engines/pdbe.py
@@ -34,10 +34,7 @@ def request(query, params):
params['url'] = pdbe_solr_url
params['method'] = 'POST'
- params['data'] = {
- 'q': query,
- 'wt': "json" # request response in parsable format
- }
+ params['data'] = {'q': query, 'wt': "json"} # request response in parsable format
return params
@@ -53,12 +50,21 @@ def construct_body(result):
if result['journal']:
content = content.format(
title=result['citation_title'],
- authors=result['entry_author_list'][0], journal=result['journal'], volume=result['journal_volume'],
- page=result['journal_page'], year=result['citation_year'])
+ authors=result['entry_author_list'][0],
+ journal=result['journal'],
+ volume=result['journal_volume'],
+ page=result['journal_page'],
+ year=result['citation_year'],
+ )
else:
content = content.format(
title=result['citation_title'],
- authors=result['entry_author_list'][0], journal='', volume='', page='', year=result['release_year'])
+ authors=result['entry_author_list'][0],
+ journal='',
+ volume='',
+ page='',
+ year=result['release_year'],
+ )
img_src = pdbe_preview_url.format(pdb_id=result['pdb_id'])
except (KeyError):
content = None
@@ -96,20 +102,21 @@ def response(resp):
# since we can't construct a proper body from the response, we'll make up our own
msg_superseded = gettext("This entry has been superseded by")
content = '{msg_superseded}: {url} ({pdb_id})'.format(
- msg_superseded=msg_superseded,
- url=superseded_url,
- pdb_id=result['superseded_by'])
+ msg_superseded=msg_superseded, url=superseded_url, pdb_id=result['superseded_by']
+ )
# obsoleted entries don't have preview images
img_src = None
else:
title, content, img_src = construct_body(result)
- results.append({
- 'url': pdbe_entry_url.format(pdb_id=result['pdb_id']),
- 'title': title,
- 'content': content,
- 'img_src': img_src
- })
+ results.append(
+ {
+ 'url': pdbe_entry_url.format(pdb_id=result['pdb_id']),
+ 'title': title,
+ 'content': content,
+ 'img_src': img_src,
+ }
+ )
return results
diff --git a/searx/engines/peertube.py b/searx/engines/peertube.py
index f9cd50be1..1ace14027 100644
--- a/searx/engines/peertube.py
+++ b/searx/engines/peertube.py
@@ -36,9 +36,7 @@ def request(query, params):
language = params["language"].split("-")[0]
if "all" != language and language in supported_languages:
query_dict["languageOneOf"] = language
- params["url"] = search_url.format(
- query=urlencode(query_dict), pageno=pageno
- )
+ params["url"] = search_url.format(query=urlencode(query_dict), pageno=pageno)
return params
diff --git a/searx/engines/photon.py b/searx/engines/photon.py
index f85dcad86..16ea88194 100644
--- a/searx/engines/photon.py
+++ b/searx/engines/photon.py
@@ -33,9 +33,7 @@ supported_languages = ['de', 'en', 'fr', 'it']
# do search-request
def request(query, params):
- params['url'] = base_url +\
- search_string.format(query=urlencode({'q': query}),
- limit=number_of_results)
+ params['url'] = base_url + search_string.format(query=urlencode({'q': query}), limit=number_of_results)
if params['language'] != 'all':
language = params['language'].split('_')[0]
@@ -75,59 +73,71 @@ def response(resp):
# continue if invalide osm-type
continue
- url = result_base_url.format(osm_type=osm_type,
- osm_id=properties.get('osm_id'))
+ url = result_base_url.format(osm_type=osm_type, osm_id=properties.get('osm_id'))
- osm = {'type': osm_type,
- 'id': properties.get('osm_id')}
+ osm = {'type': osm_type, 'id': properties.get('osm_id')}
geojson = r.get('geometry')
if properties.get('extent'):
- boundingbox = [properties.get('extent')[3],
- properties.get('extent')[1],
- properties.get('extent')[0],
- properties.get('extent')[2]]
+ boundingbox = [
+ properties.get('extent')[3],
+ properties.get('extent')[1],
+ properties.get('extent')[0],
+ properties.get('extent')[2],
+ ]
else:
# TODO: better boundingbox calculation
- boundingbox = [geojson['coordinates'][1],
- geojson['coordinates'][1],
- geojson['coordinates'][0],
- geojson['coordinates'][0]]
+ boundingbox = [
+ geojson['coordinates'][1],
+ geojson['coordinates'][1],
+ geojson['coordinates'][0],
+ geojson['coordinates'][0],
+ ]
# address calculation
address = {}
# get name
- if properties.get('osm_key') == 'amenity' or\
- properties.get('osm_key') == 'shop' or\
- properties.get('osm_key') == 'tourism' or\
- properties.get('osm_key') == 'leisure':
+ if (
+ properties.get('osm_key') == 'amenity'
+ or properties.get('osm_key') == 'shop'
+ or properties.get('osm_key') == 'tourism'
+ or properties.get('osm_key') == 'leisure'
+ ):
address = {'name': properties.get('name')}
# add rest of adressdata, if something is already found
if address.get('name'):
- address.update({'house_number': properties.get('housenumber'),
- 'road': properties.get('street'),
- 'locality': properties.get('city',
- properties.get('town', # noqa
- properties.get('village'))), # noqa
- 'postcode': properties.get('postcode'),
- 'country': properties.get('country')})
+ address.update(
+ {
+ 'house_number': properties.get('housenumber'),
+ 'road': properties.get('street'),
+ 'locality': properties.get(
+ 'city', properties.get('town', properties.get('village')) # noqa
+ ), # noqa
+ 'postcode': properties.get('postcode'),
+ 'country': properties.get('country'),
+ }
+ )
else:
address = None
# append result
- results.append({'template': 'map.html',
- 'title': title,
- 'content': '',
- 'longitude': geojson['coordinates'][0],
- 'latitude': geojson['coordinates'][1],
- 'boundingbox': boundingbox,
- 'geojson': geojson,
- 'address': address,
- 'osm': osm,
- 'url': url})
+ results.append(
+ {
+ 'template': 'map.html',
+ 'title': title,
+ 'content': '',
+ 'longitude': geojson['coordinates'][0],
+ 'latitude': geojson['coordinates'][1],
+ 'boundingbox': boundingbox,
+ 'geojson': geojson,
+ 'address': address,
+ 'osm': osm,
+ 'url': url,
+ }
+ )
# return results
return results
diff --git a/searx/engines/piratebay.py b/searx/engines/piratebay.py
index d4b94ecfa..4b0984be5 100644
--- a/searx/engines/piratebay.py
+++ b/searx/engines/piratebay.py
@@ -40,17 +40,14 @@ trackers = [
]
# piratebay specific type-definitions
-search_types = {"files": "0",
- "music": "100",
- "videos": "200"}
+search_types = {"files": "0", "music": "100", "videos": "200"}
# do search-request
def request(query, params):
search_type = search_types.get(params["category"], "0")
- params["url"] = search_url.format(search_term=quote(query),
- search_type=search_type)
+ params["url"] = search_url.format(search_term=quote(query), search_type=search_type)
return params
@@ -68,8 +65,9 @@ def response(resp):
# parse results
for result in search_res:
link = url + "description.php?id=" + result["id"]
- magnetlink = "magnet:?xt=urn:btih:" + result["info_hash"] + "&dn=" + result["name"]\
- + "&tr=" + "&tr=".join(trackers)
+ magnetlink = (
+ "magnet:?xt=urn:btih:" + result["info_hash"] + "&dn=" + result["name"] + "&tr=" + "&tr=".join(trackers)
+ )
params = {
"url": link,
@@ -77,7 +75,7 @@ def response(resp):
"seed": result["seeders"],
"leech": result["leechers"],
"magnetlink": magnetlink,
- "template": "torrent.html"
+ "template": "torrent.html",
}
# extract and convert creation date
diff --git a/searx/engines/postgresql.py b/searx/engines/postgresql.py
index 1eddcd519..d8bbabe27 100644
--- a/searx/engines/postgresql.py
+++ b/searx/engines/postgresql.py
@@ -20,6 +20,7 @@ paging = True
result_template = 'key-value.html'
_connection = None
+
def init(engine_settings):
global _connection # pylint: disable=global-statement
@@ -30,25 +31,24 @@ def init(engine_settings):
raise ValueError('only SELECT query is supported')
_connection = psycopg2.connect(
- database = database,
- user = username,
- password = password,
- host = host,
- port = port,
+ database=database,
+ user=username,
+ password=password,
+ host=host,
+ port=port,
)
+
def search(query, params):
query_params = {'query': query}
- query_to_run = (
- query_str
- + ' LIMIT {0} OFFSET {1}'.format(limit, (params['pageno'] - 1) * limit)
- )
+ query_to_run = query_str + ' LIMIT {0} OFFSET {1}'.format(limit, (params['pageno'] - 1) * limit)
with _connection:
with _connection.cursor() as cur:
cur.execute(query_to_run, query_params)
return _fetch_results(cur)
+
def _fetch_results(cur):
results = []
titles = []
diff --git a/searx/engines/pubmed.py b/searx/engines/pubmed.py
index 5d88d398e..27444ae24 100644
--- a/searx/engines/pubmed.py
+++ b/searx/engines/pubmed.py
@@ -15,7 +15,7 @@ about = {
"wikidata_id": 'Q1540899',
"official_api_documentation": {
'url': 'https://www.ncbi.nlm.nih.gov/home/develop/api/',
- 'comment': 'More info on api: https://www.ncbi.nlm.nih.gov/books/NBK25501/'
+ 'comment': 'More info on api: https://www.ncbi.nlm.nih.gov/books/NBK25501/',
},
"use_official_api": True,
"require_api_key": False,
@@ -24,8 +24,9 @@ about = {
categories = ['science']
-base_url = 'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi'\
- + '?db=pubmed&{query}&retstart={offset}&retmax={hits}'
+base_url = (
+ 'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi' + '?db=pubmed&{query}&retstart={offset}&retmax={hits}'
+)
# engine dependent config
number_of_results = 10
@@ -36,9 +37,7 @@ def request(query, params):
# basic search
offset = (params['pageno'] - 1) * number_of_results
- string_args = dict(query=urlencode({'term': query}),
- offset=offset,
- hits=number_of_results)
+ string_args = dict(query=urlencode({'term': query}), offset=offset, hits=number_of_results)
params['url'] = base_url.format(**string_args)
@@ -49,8 +48,9 @@ def response(resp):
results = []
# First retrieve notice of each result
- pubmed_retrieve_api_url = 'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?'\
- + 'db=pubmed&retmode=xml&id={pmids_string}'
+ pubmed_retrieve_api_url = (
+ 'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?' + 'db=pubmed&retmode=xml&id={pmids_string}'
+ )
pmids_results = etree.XML(resp.content)
pmids = pmids_results.xpath('//eSearchResult/IdList/Id')
@@ -88,14 +88,17 @@ def response(resp):
content = content[0:300] + "..."
# TODO: center snippet on query term
- res_dict = {'url': url,
- 'title': title,
- 'content': content}
+ res_dict = {'url': url, 'title': title, 'content': content}
try:
- publishedDate = datetime.strptime(entry.xpath('.//DateCreated/Year')[0].text
- + '-' + entry.xpath('.//DateCreated/Month')[0].text
- + '-' + entry.xpath('.//DateCreated/Day')[0].text, '%Y-%m-%d')
+ publishedDate = datetime.strptime(
+ entry.xpath('.//DateCreated/Year')[0].text
+ + '-'
+ + entry.xpath('.//DateCreated/Month')[0].text
+ + '-'
+ + entry.xpath('.//DateCreated/Day')[0].text,
+ '%Y-%m-%d',
+ )
res_dict['publishedDate'] = publishedDate
except:
pass
diff --git a/searx/engines/qwant.py b/searx/engines/qwant.py
index 0312e518c..a1799491a 100644
--- a/searx/engines/qwant.py
+++ b/searx/engines/qwant.py
@@ -61,6 +61,7 @@ category_to_keyword = {
# search-url
url = 'https://api.qwant.com/v3/search/{keyword}?{query}&count={count}&offset={offset}'
+
def request(query, params):
"""Qwant search request"""
keyword = category_to_keyword[categories[0]]
@@ -77,10 +78,10 @@ def request(query, params):
offset = min(offset, 40)
params['url'] = url.format(
- keyword = keyword,
- query = urlencode({'q': query}),
- offset = offset,
- count = count,
+ keyword=keyword,
+ query=urlencode({'q': query}),
+ offset=offset,
+ count=count,
)
# add language tag
@@ -111,7 +112,14 @@ def response(resp):
# check for an API error
if search_results.get('status') != 'success':
- msg = ",".join(data.get('message', ['unknown', ]))
+ msg = ",".join(
+ data.get(
+ 'message',
+ [
+ 'unknown',
+ ],
+ )
+ )
raise SearxEngineAPIException('API error::' + msg)
# raise for other errors
@@ -128,7 +136,7 @@ def response(resp):
# result['items'].
mainline = data.get('result', {}).get('items', [])
mainline = [
- {'type' : keyword, 'items' : mainline },
+ {'type': keyword, 'items': mainline},
]
# return empty array if there are no results
@@ -153,11 +161,13 @@ def response(resp):
if mainline_type == 'web':
content = item['desc']
- results.append({
- 'title': title,
- 'url': res_url,
- 'content': content,
- })
+ results.append(
+ {
+ 'title': title,
+ 'url': res_url,
+ 'content': content,
+ }
+ )
elif mainline_type == 'news':
@@ -168,23 +178,27 @@ def response(resp):
img_src = None
if news_media:
img_src = news_media[0].get('pict', {}).get('url', None)
- results.append({
- 'title': title,
- 'url': res_url,
- 'publishedDate': pub_date,
- 'img_src': img_src,
- })
+ results.append(
+ {
+ 'title': title,
+ 'url': res_url,
+ 'publishedDate': pub_date,
+ 'img_src': img_src,
+ }
+ )
elif mainline_type == 'images':
thumbnail = item['thumbnail']
img_src = item['media']
- results.append({
- 'title': title,
- 'url': res_url,
- 'template': 'images.html',
- 'thumbnail_src': thumbnail,
- 'img_src': img_src,
- })
+ results.append(
+ {
+ 'title': title,
+ 'url': res_url,
+ 'template': 'images.html',
+ 'thumbnail_src': thumbnail,
+ 'img_src': img_src,
+ }
+ )
elif mainline_type == 'videos':
# some videos do not have a description: while qwant-video
@@ -208,19 +222,18 @@ def response(resp):
thumbnail = item['thumbnail']
# from some locations (DE and others?) the s2 link do
# response a 'Please wait ..' but does not deliver the thumbnail
- thumbnail = thumbnail.replace(
- 'https://s2.qwant.com',
- 'https://s1.qwant.com', 1
+ thumbnail = thumbnail.replace('https://s2.qwant.com', 'https://s1.qwant.com', 1)
+ results.append(
+ {
+ 'title': title,
+ 'url': res_url,
+ 'content': content,
+ 'publishedDate': pub_date,
+ 'thumbnail': thumbnail,
+ 'template': 'videos.html',
+ 'length': length,
+ }
)
- results.append({
- 'title': title,
- 'url': res_url,
- 'content': content,
- 'publishedDate': pub_date,
- 'thumbnail': thumbnail,
- 'template': 'videos.html',
- 'length': length,
- })
return results
@@ -229,8 +242,8 @@ def response(resp):
def _fetch_supported_languages(resp):
# list of regions is embedded in page as a js object
response_text = resp.text
- response_text = response_text[response_text.find('INITIAL_PROPS'):]
- response_text = response_text[response_text.find('{'):response_text.find('</script>')]
+ response_text = response_text[response_text.find('INITIAL_PROPS') :]
+ response_text = response_text[response_text.find('{') : response_text.find('</script>')]
regions_json = loads(response_text)
diff --git a/searx/engines/recoll.py b/searx/engines/recoll.py
index 42f2858d7..ebcd83b8d 100644
--- a/searx/engines/recoll.py
+++ b/searx/engines/recoll.py
@@ -28,18 +28,12 @@ mount_prefix = None
dl_prefix = None
# embedded
-embedded_url = '<{ttype} controls height="166px" ' +\
- 'src="{url}" type="{mtype}"></{ttype}>'
+embedded_url = '<{ttype} controls height="166px" ' + 'src="{url}" type="{mtype}"></{ttype}>'
# helper functions
def get_time_range(time_range):
- sw = {
- 'day': 1,
- 'week': 7,
- 'month': 30,
- 'year': 365
- }
+ sw = {'day': 1, 'week': 7, 'month': 30, 'year': 365}
offset = sw.get(time_range, 0)
if not offset:
@@ -52,11 +46,9 @@ def get_time_range(time_range):
def request(query, params):
search_after = get_time_range(params['time_range'])
search_url = base_url + 'json?{query}&highlight=0'
- params['url'] = search_url.format(query=urlencode({
- 'query': query,
- 'page': params['pageno'],
- 'after': search_after,
- 'dir': search_dir}))
+ params['url'] = search_url.format(
+ query=urlencode({'query': query, 'page': params['pageno'], 'after': search_after, 'dir': search_dir})
+ )
return params
@@ -76,10 +68,7 @@ def response(resp):
content = '{}'.format(result['snippet'])
# append result
- item = {'url': url,
- 'title': title,
- 'content': content,
- 'template': 'files.html'}
+ item = {'url': url, 'title': title, 'content': content, 'template': 'files.html'}
if result['size']:
item['size'] = int(result['size'])
@@ -96,9 +85,8 @@ def response(resp):
if mtype in ['audio', 'video']:
item['embedded'] = embedded_url.format(
- ttype=mtype,
- url=quote(url.encode('utf8'), '/:'),
- mtype=result['mtype'])
+ ttype=mtype, url=quote(url.encode('utf8'), '/:'), mtype=result['mtype']
+ )
if mtype in ['image'] and subtype in ['bmp', 'gif', 'jpeg', 'png']:
item['img_src'] = url
diff --git a/searx/engines/reddit.py b/searx/engines/reddit.py
index ca6cb28a8..36d92339d 100644
--- a/searx/engines/reddit.py
+++ b/searx/engines/reddit.py
@@ -52,10 +52,7 @@ def response(resp):
data = post['data']
# extract post information
- params = {
- 'url': urljoin(base_url, data['permalink']),
- 'title': data['title']
- }
+ params = {'url': urljoin(base_url, data['permalink']), 'title': data['title']}
# if thumbnail field contains a valid URL, we need to change template
thumbnail = data['thumbnail']
diff --git a/searx/engines/redis_server.py b/searx/engines/redis_server.py
index f9726033d..03786f81d 100644
--- a/searx/engines/redis_server.py
+++ b/searx/engines/redis_server.py
@@ -20,16 +20,19 @@ result_template = 'key-value.html'
exact_match_only = True
_redis_client = None
+
+
def init(_engine_settings):
global _redis_client # pylint: disable=global-statement
_redis_client = redis.StrictRedis(
- host = host,
- port = port,
- db = db,
- password = password or None,
- decode_responses = True,
+ host=host,
+ port=port,
+ db=db,
+ password=password or None,
+ decode_responses=True,
)
+
def search(query, _params):
if not exact_match_only:
return search_keys(query)
@@ -42,21 +45,20 @@ def search(query, _params):
if ' ' in query:
qset, rest = query.split(' ', 1)
ret = []
- for res in _redis_client.hscan_iter(
- qset, match='*{}*'.format(rest)
- ):
- ret.append({
- res[0]: res[1],
- 'template': result_template,
- })
+ for res in _redis_client.hscan_iter(qset, match='*{}*'.format(rest)):
+ ret.append(
+ {
+ res[0]: res[1],
+ 'template': result_template,
+ }
+ )
return ret
return []
+
def search_keys(query):
ret = []
- for key in _redis_client.scan_iter(
- match='*{}*'.format(query)
- ):
+ for key in _redis_client.scan_iter(match='*{}*'.format(query)):
key_type = _redis_client.type(key)
res = None
diff --git a/searx/engines/rumble.py b/searx/engines/rumble.py
index 407142467..beca2570c 100644
--- a/searx/engines/rumble.py
+++ b/searx/engines/rumble.py
@@ -68,14 +68,16 @@ def response(resp):
else:
content = f"{views} views - {rumbles} rumbles"
- results.append({
- 'url': url,
- 'title': title,
- 'content': content,
- 'author': author,
- 'length': length,
- 'template': 'videos.html',
- 'publishedDate': fixed_date,
- 'thumbnail': thumbnail,
- })
+ results.append(
+ {
+ 'url': url,
+ 'title': title,
+ 'content': content,
+ 'author': author,
+ 'length': length,
+ 'template': 'videos.html',
+ 'publishedDate': fixed_date,
+ 'thumbnail': thumbnail,
+ }
+ )
return results
diff --git a/searx/engines/scanr_structures.py b/searx/engines/scanr_structures.py
index 51c925247..ad27079dd 100644
--- a/searx/engines/scanr_structures.py
+++ b/searx/engines/scanr_structures.py
@@ -32,12 +32,16 @@ def request(query, params):
params['url'] = search_url
params['method'] = 'POST'
params['headers']['Content-type'] = "application/json"
- params['data'] = dumps({"query": query,
- "searchField": "ALL",
- "sortDirection": "ASC",
- "sortOrder": "RELEVANCY",
- "page": params['pageno'],
- "pageSize": page_size})
+ params['data'] = dumps(
+ {
+ "query": query,
+ "searchField": "ALL",
+ "sortDirection": "ASC",
+ "sortOrder": "RELEVANCY",
+ "page": params['pageno'],
+ "pageSize": page_size,
+ }
+ )
return params
@@ -69,11 +73,15 @@ def response(resp):
content = result['highlights'][0]['value']
# append result
- results.append({'url': url + 'structure/' + result['id'],
- 'title': result['label'],
- # 'thumbnail': thumbnail,
- 'img_src': thumbnail,
- 'content': html_to_text(content)})
+ results.append(
+ {
+ 'url': url + 'structure/' + result['id'],
+ 'title': result['label'],
+ # 'thumbnail': thumbnail,
+ 'img_src': thumbnail,
+ 'content': html_to_text(content),
+ }
+ )
# return results
return results
diff --git a/searx/engines/searchcode_code.py b/searx/engines/searchcode_code.py
index 8c1330d98..a4b0308f9 100644
--- a/searx/engines/searchcode_code.py
+++ b/searx/engines/searchcode_code.py
@@ -25,10 +25,7 @@ url = 'https://searchcode.com/'
search_url = url + 'api/codesearch_I/?{query}&p={pageno}'
# special code-endings which are not recognised by the file ending
-code_endings = {'cs': 'c#',
- 'h': 'c',
- 'hpp': 'cpp',
- 'cxx': 'cpp'}
+code_endings = {'cs': 'c#', 'h': 'c', 'hpp': 'cpp', 'cxx': 'cpp'}
# do search-request
@@ -55,17 +52,21 @@ def response(resp):
lines[int(line)] = code
code_language = code_endings.get(
- result['filename'].split('.')[-1].lower(),
- result['filename'].split('.')[-1].lower())
+ result['filename'].split('.')[-1].lower(), result['filename'].split('.')[-1].lower()
+ )
# append result
- results.append({'url': href,
- 'title': title,
- 'content': '',
- 'repository': repo,
- 'codelines': sorted(lines.items()),
- 'code_language': code_language,
- 'template': 'code.html'})
+ results.append(
+ {
+ 'url': href,
+ 'title': title,
+ 'content': '',
+ 'repository': repo,
+ 'codelines': sorted(lines.items()),
+ 'code_language': code_language,
+ 'template': 'code.html',
+ }
+ )
# return results
return results
diff --git a/searx/engines/searx_engine.py b/searx/engines/searx_engine.py
index 98ef0fb79..3e9035d6f 100644
--- a/searx/engines/searx_engine.py
+++ b/searx/engines/searx_engine.py
@@ -37,7 +37,7 @@ def request(query, params):
'language': params['language'],
'time_range': params['time_range'],
'category': params['category'],
- 'format': 'json'
+ 'format': 'json',
}
return params
diff --git a/searx/engines/semantic_scholar.py b/searx/engines/semantic_scholar.py
index 297d0cf71..5d9d1a8e9 100644
--- a/searx/engines/semantic_scholar.py
+++ b/searx/engines/semantic_scholar.py
@@ -13,19 +13,21 @@ def request(query, params):
params['url'] = search_url
params['method'] = 'POST'
params['headers']['content-type'] = 'application/json'
- params['data'] = dumps({
- "queryString": query,
- "page": params['pageno'],
- "pageSize": 10,
- "sort": "relevance",
- "useFallbackRankerService": False,
- "useFallbackSearchCluster": False,
- "getQuerySuggestions": False,
- "authors": [],
- "coAuthors": [],
- "venues": [],
- "performTitleMatch": True,
- })
+ params['data'] = dumps(
+ {
+ "queryString": query,
+ "page": params['pageno'],
+ "pageSize": 10,
+ "sort": "relevance",
+ "useFallbackRankerService": False,
+ "useFallbackSearchCluster": False,
+ "getQuerySuggestions": False,
+ "authors": [],
+ "coAuthors": [],
+ "venues": [],
+ "performTitleMatch": True,
+ }
+ )
return params
@@ -33,10 +35,12 @@ def response(resp):
res = loads(resp.text)
results = []
for result in res['results']:
- results.append({
- 'url': result['primaryPaperLink']['url'],
- 'title': result['title']['text'],
- 'content': result['paperAbstractTruncated']
- })
+ results.append(
+ {
+ 'url': result['primaryPaperLink']['url'],
+ 'title': result['title']['text'],
+ 'content': result['paperAbstractTruncated'],
+ }
+ )
return results
diff --git a/searx/engines/sepiasearch.py b/searx/engines/sepiasearch.py
index ebad20d01..00b1b3672 100644
--- a/searx/engines/sepiasearch.py
+++ b/searx/engines/sepiasearch.py
@@ -31,17 +31,13 @@ supported_languages = [
]
base_url = 'https://sepiasearch.org/api/v1/search/videos'
-safesearch_table = {
- 0: 'both',
- 1: 'false',
- 2: 'false'
-}
+safesearch_table = {0: 'both', 1: 'false', 2: 'false'}
time_range_table = {
'day': relativedelta.relativedelta(),
'week': relativedelta.relativedelta(weeks=-1),
'month': relativedelta.relativedelta(months=-1),
- 'year': relativedelta.relativedelta(years=-1)
+ 'year': relativedelta.relativedelta(years=-1),
}
@@ -55,13 +51,19 @@ def minute_to_hm(minute):
def request(query, params):
- params['url'] = base_url + '?' + urlencode({
- 'search': query,
- 'start': (params['pageno'] - 1) * 10,
- 'count': 10,
- 'sort': '-match',
- 'nsfw': safesearch_table[params['safesearch']]
- })
+ params['url'] = (
+ base_url
+ + '?'
+ + urlencode(
+ {
+ 'search': query,
+ 'start': (params['pageno'] - 1) * 10,
+ 'count': 10,
+ 'sort': '-match',
+ 'nsfw': safesearch_table[params['safesearch']],
+ }
+ )
+ )
language = params['language'].split('-')[0]
if language in supported_languages:
@@ -91,14 +93,18 @@ def response(resp):
length = minute_to_hm(result.get('duration'))
url = result['url']
- results.append({'url': url,
- 'title': title,
- 'content': content,
- 'author': author,
- 'length': length,
- 'template': 'videos.html',
- 'publishedDate': publishedDate,
- 'embedded': embedded,
- 'thumbnail': thumbnail})
+ results.append(
+ {
+ 'url': url,
+ 'title': title,
+ 'content': content,
+ 'author': author,
+ 'length': length,
+ 'template': 'videos.html',
+ 'publishedDate': publishedDate,
+ 'embedded': embedded,
+ 'thumbnail': thumbnail,
+ }
+ )
return results
diff --git a/searx/engines/seznam.py b/searx/engines/seznam.py
index 85cb25b7f..2e95b4769 100644
--- a/searx/engines/seznam.py
+++ b/searx/engines/seznam.py
@@ -58,10 +58,12 @@ def response(resp):
if result_data is None:
continue
title_element = eval_xpath_getindex(result_element, './/h3/a', 0)
- results.append({
- 'url': title_element.get('href'),
- 'title': extract_text(title_element),
- 'content': extract_text(eval_xpath(result_data, './/div[@class="_3eded7"]')),
- })
+ results.append(
+ {
+ 'url': title_element.get('href'),
+ 'title': extract_text(title_element),
+ 'content': extract_text(eval_xpath(result_data, './/div[@class="_3eded7"]')),
+ }
+ )
return results
diff --git a/searx/engines/sjp.py b/searx/engines/sjp.py
index 884fddd2d..ad498b847 100644
--- a/searx/engines/sjp.py
+++ b/searx/engines/sjp.py
@@ -28,9 +28,11 @@ URL = 'https://sjp.pwn.pl'
SEARCH_URL = URL + '/szukaj/{query}.html'
word_xpath = '//div[@class="query"]'
-dict_xpath = ['//div[@class="wyniki sjp-so-wyniki sjp-so-anchor"]',
- '//div[@class="wyniki sjp-wyniki sjp-anchor"]',
- '//div[@class="wyniki sjp-doroszewski-wyniki sjp-doroszewski-anchor"]']
+dict_xpath = [
+ '//div[@class="wyniki sjp-so-wyniki sjp-so-anchor"]',
+ '//div[@class="wyniki sjp-wyniki sjp-anchor"]',
+ '//div[@class="wyniki sjp-doroszewski-wyniki sjp-doroszewski-anchor"]',
+]
def request(query, params):
@@ -85,9 +87,11 @@ def response(resp):
infobox += "</ol>"
infobox += "</ul></div>"
- results.append({
- 'infobox': word,
- 'content': infobox,
- })
+ results.append(
+ {
+ 'infobox': word,
+ 'content': infobox,
+ }
+ )
return results
diff --git a/searx/engines/solidtorrents.py b/searx/engines/solidtorrents.py
index 7fbef9190..614b38277 100644
--- a/searx/engines/solidtorrents.py
+++ b/searx/engines/solidtorrents.py
@@ -36,14 +36,16 @@ def response(resp):
search_results = loads(resp.text)
for result in search_results["results"]:
- results.append({
- 'infohash': result["infohash"],
- 'seed': result["swarm"]["seeders"],
- 'leech': result["swarm"]["leechers"],
- 'title': result["title"],
- 'url': "https://solidtorrents.net/view/" + result["_id"],
- 'filesize': result["size"],
- 'magnetlink': result["magnet"],
- 'template': "torrent.html",
- })
+ results.append(
+ {
+ 'infohash': result["infohash"],
+ 'seed': result["swarm"]["seeders"],
+ 'leech': result["swarm"]["leechers"],
+ 'title': result["title"],
+ 'url': "https://solidtorrents.net/view/" + result["_id"],
+ 'filesize': result["size"],
+ 'magnetlink': result["magnet"],
+ 'template': "torrent.html",
+ }
+ )
return results
diff --git a/searx/engines/solr.py b/searx/engines/solr.py
index e26f19442..3e7846f8e 100644
--- a/searx/engines/solr.py
+++ b/searx/engines/solr.py
@@ -14,10 +14,10 @@ from searx.exceptions import SearxEngineAPIException
base_url = 'http://localhost:8983'
collection = ''
rows = 10
-sort = '' # sorting: asc or desc
-field_list = 'name' # list of field names to display on the UI
-default_fields = '' # default field to query
-query_fields = '' # query fields
+sort = '' # sorting: asc or desc
+field_list = 'name' # list of field names to display on the UI
+default_fields = '' # default field to query
+query_fields = '' # query fields
_search_url = ''
paging = True
diff --git a/searx/engines/soundcloud.py b/searx/engines/soundcloud.py
index d5bfc0f6f..004164e37 100644
--- a/searx/engines/soundcloud.py
+++ b/searx/engines/soundcloud.py
@@ -27,17 +27,21 @@ paging = True
# search-url
# missing attribute: user_id, app_version, app_locale
url = 'https://api-v2.soundcloud.com/'
-search_url = url + 'search?{query}'\
- '&variant_ids='\
- '&facet=model'\
- '&limit=20'\
- '&offset={offset}'\
- '&linked_partitioning=1'\
- '&client_id={client_id}' # noqa
-
-embedded_url = '<iframe width="100%" height="166" ' +\
- 'scrolling="no" frameborder="no" ' +\
- 'data-src="https://w.soundcloud.com/player/?url={uri}"></iframe>'
+search_url = (
+ url + 'search?{query}'
+ '&variant_ids='
+ '&facet=model'
+ '&limit=20'
+ '&offset={offset}'
+ '&linked_partitioning=1'
+ '&client_id={client_id}'
+) # noqa
+
+embedded_url = (
+ '<iframe width="100%" height="166" '
+ + 'scrolling="no" frameborder="no" '
+ + 'data-src="https://w.soundcloud.com/player/?url={uri}"></iframe>'
+)
cid_re = re.compile(r'client_id:"([^"]*)"', re.I | re.U)
guest_client_id = ''
@@ -75,9 +79,7 @@ def init(engine_settings=None):
def request(query, params):
offset = (params['pageno'] - 1) * 20
- params['url'] = search_url.format(query=urlencode({'q': query}),
- offset=offset,
- client_id=guest_client_id)
+ params['url'] = search_url.format(query=urlencode({'q': query}), offset=offset, client_id=guest_client_id)
return params
@@ -98,11 +100,15 @@ def response(resp):
embedded = embedded_url.format(uri=uri)
# append result
- results.append({'url': result['permalink_url'],
- 'title': title,
- 'publishedDate': publishedDate,
- 'embedded': embedded,
- 'content': content})
+ results.append(
+ {
+ 'url': result['permalink_url'],
+ 'title': title,
+ 'publishedDate': publishedDate,
+ 'embedded': embedded,
+ 'content': content,
+ }
+ )
# return results
return results
diff --git a/searx/engines/spotify.py b/searx/engines/spotify.py
index 6816fe672..15517e3eb 100644
--- a/searx/engines/spotify.py
+++ b/searx/engines/spotify.py
@@ -42,9 +42,10 @@ def request(query, params):
r = http_post(
'https://accounts.spotify.com/api/token',
data={'grant_type': 'client_credentials'},
- headers={'Authorization': 'Basic ' + base64.b64encode(
- "{}:{}".format(api_client_id, api_client_secret).encode()
- ).decode()}
+ headers={
+ 'Authorization': 'Basic '
+ + base64.b64encode("{}:{}".format(api_client_id, api_client_secret).encode()).decode()
+ },
)
j = loads(r.text)
params['headers'] = {'Authorization': 'Bearer {}'.format(j.get('access_token'))}
@@ -63,18 +64,12 @@ def response(resp):
if result['type'] == 'track':
title = result['name']
url = result['external_urls']['spotify']
- content = '{} - {} - {}'.format(
- result['artists'][0]['name'],
- result['album']['name'],
- result['name'])
+ content = '{} - {} - {}'.format(result['artists'][0]['name'], result['album']['name'], result['name'])
embedded = embedded_url.format(audioid=result['id'])
# append result
- results.append({'url': url,
- 'title': title,
- 'embedded': embedded,
- 'content': content})
+ results.append({'url': url, 'title': title, 'embedded': embedded, 'content': content})
# return results
return results
diff --git a/searx/engines/springer.py b/searx/engines/springer.py
index 246e59b44..512d71e5e 100644
--- a/searx/engines/springer.py
+++ b/searx/engines/springer.py
@@ -26,15 +26,11 @@ api_key = 'unset'
base_url = 'https://api.springernature.com/metadata/json?'
+
def request(query, params):
if api_key == 'unset':
raise SearxEngineAPIException('missing Springer-Nature API key')
- args = urlencode({
- 'q' : query,
- 's' : nb_per_page * (params['pageno'] - 1),
- 'p' : nb_per_page,
- 'api_key' : api_key
- })
+ args = urlencode({'q': query, 's': nb_per_page * (params['pageno'] - 1), 'p': nb_per_page, 'api_key': api_key})
params['url'] = base_url + args
logger.debug("query_url --> %s", params['url'])
return params
@@ -50,21 +46,27 @@ def response(resp):
content += "..."
published = datetime.strptime(record['publicationDate'], '%Y-%m-%d')
- metadata = [record[x] for x in [
- 'publicationName',
- 'identifier',
- 'contentType',
- ] if record.get(x) is not None]
+ metadata = [
+ record[x]
+ for x in [
+ 'publicationName',
+ 'identifier',
+ 'contentType',
+ ]
+ if record.get(x) is not None
+ ]
metadata = ' / '.join(metadata)
if record.get('startingPage') and record.get('endingPage') is not None:
metadata += " (%(startingPage)s-%(endingPage)s)" % record
- results.append({
- 'title': record['title'],
- 'url': record['url'][0]['value'].replace('http://', 'https://', 1),
- 'content' : content,
- 'publishedDate' : published,
- 'metadata' : metadata
- })
+ results.append(
+ {
+ 'title': record['title'],
+ 'url': record['url'][0]['value'].replace('http://', 'https://', 1),
+ 'content': content,
+ 'publishedDate': published,
+ 'metadata': metadata,
+ }
+ )
return results
diff --git a/searx/engines/sqlite.py b/searx/engines/sqlite.py
index 43a85efbb..6de12f5fe 100644
--- a/searx/engines/sqlite.py
+++ b/searx/engines/sqlite.py
@@ -47,9 +47,9 @@ def search(query, params):
query_params = {
'query': query,
- 'wildcard': r'%' + query.replace(' ', r'%') + r'%',
+ 'wildcard': r'%' + query.replace(' ', r'%') + r'%',
'limit': limit,
- 'offset': (params['pageno'] - 1) * limit
+ 'offset': (params['pageno'] - 1) * limit,
}
query_to_run = query_str + ' LIMIT :limit OFFSET :offset'
@@ -59,7 +59,7 @@ def search(query, params):
col_names = [cn[0] for cn in cur.description]
for row in cur.fetchall():
- item = dict( zip(col_names, map(str, row)) )
+ item = dict(zip(col_names, map(str, row)))
item['template'] = result_template
logger.debug("append result --> %s", item)
results.append(item)
diff --git a/searx/engines/stackexchange.py b/searx/engines/stackexchange.py
index 34cba687c..99615b1a7 100644
--- a/searx/engines/stackexchange.py
+++ b/searx/engines/stackexchange.py
@@ -23,26 +23,30 @@ paging = True
pagesize = 10
api_site = 'stackoverflow'
-api_sort= 'activity'
+api_sort = 'activity'
api_order = 'desc'
# https://api.stackexchange.com/docs/advanced-search
search_api = 'https://api.stackexchange.com/2.3/search/advanced?'
+
def request(query, params):
- args = urlencode({
- 'q' : query,
- 'page' : params['pageno'],
- 'pagesize' : pagesize,
- 'site' : api_site,
- 'sort' : api_sort,
- 'order': 'desc',
- })
+ args = urlencode(
+ {
+ 'q': query,
+ 'page': params['pageno'],
+ 'pagesize': pagesize,
+ 'site': api_site,
+ 'sort': api_sort,
+ 'order': 'desc',
+ }
+ )
params['url'] = search_api + args
return params
+
def response(resp):
results = []
@@ -56,10 +60,12 @@ def response(resp):
content += ' // is answered'
content += " // score: %s" % result['score']
- results.append({
- 'url': "https://%s.com/q/%s" % (api_site, result['question_id']),
- 'title': html.unescape(result['title']),
- 'content': html.unescape(content),
- })
+ results.append(
+ {
+ 'url': "https://%s.com/q/%s" % (api_site, result['question_id']),
+ 'title': html.unescape(result['title']),
+ 'content': html.unescape(content),
+ }
+ )
return results
diff --git a/searx/engines/startpage.py b/searx/engines/startpage.py
index e71310be6..65d90debe 100644
--- a/searx/engines/startpage.py
+++ b/searx/engines/startpage.py
@@ -101,7 +101,7 @@ def response(resp):
# check if search result starts with something like: "2 Sep 2014 ... "
if re.match(r"^([1-9]|[1-2][0-9]|3[0-1]) [A-Z][a-z]{2} [0-9]{4} \.\.\. ", content):
date_pos = content.find('...') + 4
- date_string = content[0:date_pos - 5]
+ date_string = content[0 : date_pos - 5]
# fix content string
content = content[date_pos:]
@@ -113,7 +113,7 @@ def response(resp):
# check if search result starts with something like: "5 days ago ... "
elif re.match(r"^[0-9]+ days? ago \.\.\. ", content):
date_pos = content.find('...') + 4
- date_string = content[0:date_pos - 5]
+ date_string = content[0 : date_pos - 5]
# calculate datetime
published_date = datetime.now() - timedelta(days=int(re.match(r'\d+', date_string).group()))
@@ -123,15 +123,10 @@ def response(resp):
if published_date:
# append result
- results.append({'url': url,
- 'title': title,
- 'content': content,
- 'publishedDate': published_date})
+ results.append({'url': url, 'title': title, 'content': content, 'publishedDate': published_date})
else:
# append result
- results.append({'url': url,
- 'title': title,
- 'content': content})
+ results.append({'url': url, 'title': title, 'content': content})
# return results
return results
@@ -152,7 +147,7 @@ def _fetch_supported_languages(resp):
'malayam': 'ml',
'norsk': 'nb',
'sinhalese': 'si',
- 'sudanese': 'su'
+ 'sudanese': 'su',
}
# get the English name of every language known by babel
diff --git a/searx/engines/tokyotoshokan.py b/searx/engines/tokyotoshokan.py
index 0d62453a9..b01de38c1 100644
--- a/searx/engines/tokyotoshokan.py
+++ b/searx/engines/tokyotoshokan.py
@@ -56,11 +56,7 @@ def response(resp):
name_row = rows[i]
links = name_row.xpath('./td[@class="desc-top"]/a')
- params = {
- 'template': 'torrent.html',
- 'url': links[-1].attrib.get('href'),
- 'title': extract_text(links[-1])
- }
+ params = {'template': 'torrent.html', 'url': links[-1].attrib.get('href'), 'title': extract_text(links[-1])}
# I have not yet seen any torrents without magnet links, but
# it's better to be prepared to stumble upon one some day
if len(links) == 2:
diff --git a/searx/engines/torznab.py b/searx/engines/torznab.py
index 960d1ee90..a48017c13 100644
--- a/searx/engines/torznab.py
+++ b/searx/engines/torznab.py
@@ -35,10 +35,12 @@ api_key = ''
# https://newznab.readthedocs.io/en/latest/misc/api/#predefined-categories
torznab_categories = []
-def init(engine_settings=None): # pylint: disable=unused-argument
+
+def init(engine_settings=None): # pylint: disable=unused-argument
if len(base_url) < 1:
raise ValueError('missing torznab base_url')
+
def request(query, params):
search_url = base_url + '?t=search&q={search_query}'
@@ -48,13 +50,12 @@ def request(query, params):
search_url += '&cat={torznab_categories}'
params['url'] = search_url.format(
- search_query = quote(query),
- api_key = api_key,
- torznab_categories = ",".join([str(x) for x in torznab_categories])
+ search_query=quote(query), api_key=api_key, torznab_categories=",".join([str(x) for x in torznab_categories])
)
return params
+
def response(resp):
results = []
@@ -103,8 +104,7 @@ def response(resp):
result["publishedDate"] = None
try:
- result["publishedDate"] = datetime.strptime(
- get_property(item, 'pubDate'), '%a, %d %b %Y %H:%M:%S %z')
+ result["publishedDate"] = datetime.strptime(get_property(item, 'pubDate'), '%a, %d %b %Y %H:%M:%S %z')
except (ValueError, TypeError) as e:
logger.debug("ignore exception (publishedDate): %s", e)
@@ -134,9 +134,7 @@ def get_property(item, property_name):
def get_torznab_attr(item, attr_name):
element = item.find(
'.//torznab:attr[@name="{attr_name}"]'.format(attr_name=attr_name),
- {
- 'torznab': 'http://torznab.com/schemas/2015/feed'
- }
+ {'torznab': 'http://torznab.com/schemas/2015/feed'},
)
if element is not None:
diff --git a/searx/engines/translated.py b/searx/engines/translated.py
index 8d67ca0bb..62ade49e2 100644
--- a/searx/engines/translated.py
+++ b/searx/engines/translated.py
@@ -28,24 +28,25 @@ def request(query, params):
key_form = '&key=' + api_key
else:
key_form = ''
- params['url'] = url.format(from_lang=params['from_lang'][1],
- to_lang=params['to_lang'][1],
- query=params['query'],
- key=key_form)
+ params['url'] = url.format(
+ from_lang=params['from_lang'][1], to_lang=params['to_lang'][1], query=params['query'], key=key_form
+ )
return params
def response(resp):
results = []
- results.append({
- 'url': web_url.format(
- from_lang=resp.search_params['from_lang'][2],
- to_lang=resp.search_params['to_lang'][2],
- query=resp.search_params['query']),
- 'title': '[{0}-{1}] {2}'.format(
- resp.search_params['from_lang'][1],
- resp.search_params['to_lang'][1],
- resp.search_params['query']),
- 'content': resp.json()['responseData']['translatedText']
- })
+ results.append(
+ {
+ 'url': web_url.format(
+ from_lang=resp.search_params['from_lang'][2],
+ to_lang=resp.search_params['to_lang'][2],
+ query=resp.search_params['query'],
+ ),
+ 'title': '[{0}-{1}] {2}'.format(
+ resp.search_params['from_lang'][1], resp.search_params['to_lang'][1], resp.search_params['query']
+ ),
+ 'content': resp.json()['responseData']['translatedText'],
+ }
+ )
return results
diff --git a/searx/engines/unsplash.py b/searx/engines/unsplash.py
index 1445b4cec..1967fefd2 100644
--- a/searx/engines/unsplash.py
+++ b/searx/engines/unsplash.py
@@ -26,23 +26,13 @@ paging = True
def clean_url(url):
parsed = urlparse(url)
- query = [(k, v) for (k, v)
- in parse_qsl(parsed.query) if k not in ['ixid', 's']]
+ query = [(k, v) for (k, v) in parse_qsl(parsed.query) if k not in ['ixid', 's']]
- return urlunparse((
- parsed.scheme,
- parsed.netloc,
- parsed.path,
- parsed.params,
- urlencode(query),
- parsed.fragment
- ))
+ return urlunparse((parsed.scheme, parsed.netloc, parsed.path, parsed.params, urlencode(query), parsed.fragment))
def request(query, params):
- params['url'] = search_url + urlencode({
- 'query': query, 'page': params['pageno'], 'per_page': page_size
- })
+ params['url'] = search_url + urlencode({'query': query, 'page': params['pageno'], 'per_page': page_size})
logger.debug("query_url --> %s", params['url'])
return params
@@ -53,13 +43,15 @@ def response(resp):
if 'results' in json_data:
for result in json_data['results']:
- results.append({
- 'template': 'images.html',
- 'url': clean_url(result['links']['html']),
- 'thumbnail_src': clean_url(result['urls']['thumb']),
- 'img_src': clean_url(result['urls']['raw']),
- 'title': result.get('alt_description') or 'unknown',
- 'content': result.get('description') or ''
- })
+ results.append(
+ {
+ 'template': 'images.html',
+ 'url': clean_url(result['links']['html']),
+ 'thumbnail_src': clean_url(result['urls']['thumb']),
+ 'img_src': clean_url(result['urls']['raw']),
+ 'title': result.get('alt_description') or 'unknown',
+ 'content': result.get('description') or '',
+ }
+ )
return results
diff --git a/searx/engines/vimeo.py b/searx/engines/vimeo.py
index 824579256..52d201eac 100644
--- a/searx/engines/vimeo.py
+++ b/searx/engines/vimeo.py
@@ -25,15 +25,16 @@ paging = True
base_url = 'https://vimeo.com/'
search_url = base_url + '/search/page:{pageno}?{query}'
-embedded_url = '<iframe data-src="https://player.vimeo.com/video/{videoid}" ' +\
- 'width="540" height="304" frameborder="0" ' +\
- 'webkitallowfullscreen mozallowfullscreen allowfullscreen></iframe>'
+embedded_url = (
+ '<iframe data-src="https://player.vimeo.com/video/{videoid}" '
+ + 'width="540" height="304" frameborder="0" '
+ + 'webkitallowfullscreen mozallowfullscreen allowfullscreen></iframe>'
+)
# do search-request
def request(query, params):
- params['url'] = search_url.format(pageno=params['pageno'],
- query=urlencode({'q': query}))
+ params['url'] = search_url.format(pageno=params['pageno'], query=urlencode({'q': query}))
return params
@@ -56,13 +57,17 @@ def response(resp):
embedded = embedded_url.format(videoid=videoid)
# append result
- results.append({'url': url,
- 'title': title,
- 'content': '',
- 'template': 'videos.html',
- 'publishedDate': publishedDate,
- 'embedded': embedded,
- 'thumbnail': thumbnail})
+ results.append(
+ {
+ 'url': url,
+ 'title': title,
+ 'content': '',
+ 'template': 'videos.html',
+ 'publishedDate': publishedDate,
+ 'embedded': embedded,
+ 'thumbnail': thumbnail,
+ }
+ )
# return results
return results
diff --git a/searx/engines/wikidata.py b/searx/engines/wikidata.py
index 59413499c..c8881d299 100644
--- a/searx/engines/wikidata.py
+++ b/searx/engines/wikidata.py
@@ -14,7 +14,10 @@ from searx.data import WIKIDATA_UNITS
from searx.network import post, get
from searx.utils import match_language, searx_useragent, get_string_replaces_function
from searx.external_urls import get_external_url, get_earth_coordinates_url, area_to_osm_zoom
-from searx.engines.wikipedia import _fetch_supported_languages, supported_languages_url # NOQA # pylint: disable=unused-import
+from searx.engines.wikipedia import (
+ _fetch_supported_languages,
+ supported_languages_url,
+) # NOQA # pylint: disable=unused-import
# about
about = {
@@ -112,10 +115,7 @@ replace_http_by_https = get_string_replaces_function({'http:': 'https:'})
def get_headers():
# user agent: https://www.mediawiki.org/wiki/Wikidata_Query_Service/User_Manual#Query_limits
- return {
- 'Accept': 'application/sparql-results+json',
- 'User-Agent': searx_useragent()
- }
+ return {'Accept': 'application/sparql-results+json', 'User-Agent': searx_useragent()}
def get_label_for_entity(entity_id, language):
@@ -211,9 +211,9 @@ def get_results(attribute_result, attributes, language):
results.append({'title': infobox_title, 'url': url})
# update the infobox_id with the wikipedia URL
# first the local wikipedia URL, and as fallback the english wikipedia URL
- if attribute_type == WDArticle\
- and ((attribute.language == 'en' and infobox_id_lang is None)
- or attribute.language != 'en'):
+ if attribute_type == WDArticle and (
+ (attribute.language == 'en' and infobox_id_lang is None) or attribute.language != 'en'
+ ):
infobox_id_lang = attribute.language
infobox_id = url
elif attribute_type == WDImageAttribute:
@@ -232,13 +232,11 @@ def get_results(attribute_result, attributes, language):
osm_zoom = area_to_osm_zoom(area) if area else 19
url = attribute.get_geo_url(attribute_result, osm_zoom=osm_zoom)
if url:
- infobox_urls.append({'title': attribute.get_label(language),
- 'url': url,
- 'entity': attribute.name})
+ infobox_urls.append({'title': attribute.get_label(language), 'url': url, 'entity': attribute.name})
else:
- infobox_attributes.append({'label': attribute.get_label(language),
- 'value': value,
- 'entity': attribute.name})
+ infobox_attributes.append(
+ {'label': attribute.get_label(language), 'value': value, 'entity': attribute.name}
+ )
if infobox_id:
infobox_id = replace_http_by_https(infobox_id)
@@ -246,22 +244,19 @@ def get_results(attribute_result, attributes, language):
# add the wikidata URL at the end
infobox_urls.append({'title': 'Wikidata', 'url': attribute_result['item']})
- if img_src is None and len(infobox_attributes) == 0 and len(infobox_urls) == 1 and\
- len(infobox_content) == 0:
- results.append({
- 'url': infobox_urls[0]['url'],
- 'title': infobox_title,
- 'content': infobox_content
- })
+ if img_src is None and len(infobox_attributes) == 0 and len(infobox_urls) == 1 and len(infobox_content) == 0:
+ results.append({'url': infobox_urls[0]['url'], 'title': infobox_title, 'content': infobox_content})
else:
- results.append({
- 'infobox': infobox_title,
- 'id': infobox_id,
- 'content': infobox_content,
- 'img_src': img_src,
- 'urls': infobox_urls,
- 'attributes': infobox_attributes
- })
+ results.append(
+ {
+ 'infobox': infobox_title,
+ 'id': infobox_id,
+ 'content': infobox_content,
+ 'img_src': img_src,
+ 'urls': infobox_urls,
+ 'attributes': infobox_attributes,
+ }
+ )
return results
@@ -271,13 +266,14 @@ def get_query(query, language):
where = list(filter(lambda s: len(s) > 0, [a.get_where() for a in attributes]))
wikibase_label = list(filter(lambda s: len(s) > 0, [a.get_wikibase_label() for a in attributes]))
group_by = list(filter(lambda s: len(s) > 0, [a.get_group_by() for a in attributes]))
- query = QUERY_TEMPLATE\
- .replace('%QUERY%', sparql_string_escape(query))\
- .replace('%SELECT%', ' '.join(select))\
- .replace('%WHERE%', '\n '.join(where))\
- .replace('%WIKIBASE_LABELS%', '\n '.join(wikibase_label))\
- .replace('%GROUP_BY%', ' '.join(group_by))\
+ query = (
+ QUERY_TEMPLATE.replace('%QUERY%', sparql_string_escape(query))
+ .replace('%SELECT%', ' '.join(select))
+ .replace('%WHERE%', '\n '.join(where))
+ .replace('%WIKIBASE_LABELS%', '\n '.join(wikibase_label))
+ .replace('%GROUP_BY%', ' '.join(group_by))
.replace('%LANGUAGE%', language)
+ )
return query, attributes
@@ -303,90 +299,98 @@ def get_attributes(language):
attributes.append(WDDateAttribute(name))
# Dates
- for p in ['P571', # inception date
- 'P576', # dissolution date
- 'P580', # start date
- 'P582', # end date
- 'P569', # date of birth
- 'P570', # date of death
- 'P619', # date of spacecraft launch
- 'P620']: # date of spacecraft landing
+ for p in [
+ 'P571', # inception date
+ 'P576', # dissolution date
+ 'P580', # start date
+ 'P582', # end date
+ 'P569', # date of birth
+ 'P570', # date of death
+ 'P619', # date of spacecraft launch
+ 'P620',
+ ]: # date of spacecraft landing
add_date(p)
- for p in ['P27', # country of citizenship
- 'P495', # country of origin
- 'P17', # country
- 'P159']: # headquarters location
+ for p in [
+ 'P27', # country of citizenship
+ 'P495', # country of origin
+ 'P17', # country
+ 'P159',
+ ]: # headquarters location
add_label(p)
# Places
- for p in ['P36', # capital
- 'P35', # head of state
- 'P6', # head of government
- 'P122', # basic form of government
- 'P37']: # official language
+ for p in [
+ 'P36', # capital
+ 'P35', # head of state
+ 'P6', # head of government
+ 'P122', # basic form of government
+ 'P37',
+ ]: # official language
add_label(p)
- add_value('P1082') # population
+ add_value('P1082') # population
add_amount('P2046') # area
- add_amount('P281') # postal code
- add_label('P38') # currency
+ add_amount('P281') # postal code
+ add_label('P38') # currency
add_amount('P2048') # heigth (building)
# Media
- for p in ['P400', # platform (videogames, computing)
- 'P50', # author
- 'P170', # creator
- 'P57', # director
- 'P175', # performer
- 'P178', # developer
- 'P162', # producer
- 'P176', # manufacturer
- 'P58', # screenwriter
- 'P272', # production company
- 'P264', # record label
- 'P123', # publisher
- 'P449', # original network
- 'P750', # distributed by
- 'P86']: # composer
+ for p in [
+ 'P400', # platform (videogames, computing)
+ 'P50', # author
+ 'P170', # creator
+ 'P57', # director
+ 'P175', # performer
+ 'P178', # developer
+ 'P162', # producer
+ 'P176', # manufacturer
+ 'P58', # screenwriter
+ 'P272', # production company
+ 'P264', # record label
+ 'P123', # publisher
+ 'P449', # original network
+ 'P750', # distributed by
+ 'P86',
+ ]: # composer
add_label(p)
- add_date('P577') # publication date
- add_label('P136') # genre (music, film, artistic...)
- add_label('P364') # original language
- add_value('P212') # ISBN-13
- add_value('P957') # ISBN-10
- add_label('P275') # copyright license
- add_label('P277') # programming language
- add_value('P348') # version
- add_label('P840') # narrative location
+ add_date('P577') # publication date
+ add_label('P136') # genre (music, film, artistic...)
+ add_label('P364') # original language
+ add_value('P212') # ISBN-13
+ add_value('P957') # ISBN-10
+ add_label('P275') # copyright license
+ add_label('P277') # programming language
+ add_value('P348') # version
+ add_label('P840') # narrative location
# Languages
- add_value('P1098') # number of speakers
- add_label('P282') # writing system
- add_label('P1018') # language regulatory body
- add_value('P218') # language code (ISO 639-1)
+ add_value('P1098') # number of speakers
+ add_label('P282') # writing system
+ add_label('P1018') # language regulatory body
+ add_value('P218') # language code (ISO 639-1)
# Other
- add_label('P169') # ceo
- add_label('P112') # founded by
- add_label('P1454') # legal form (company, organization)
- add_label('P137') # operator (service, facility, ...)
- add_label('P1029') # crew members (tripulation)
- add_label('P225') # taxon name
- add_value('P274') # chemical formula
- add_label('P1346') # winner (sports, contests, ...)
- add_value('P1120') # number of deaths
- add_value('P498') # currency code (ISO 4217)
+ add_label('P169') # ceo
+ add_label('P112') # founded by
+ add_label('P1454') # legal form (company, organization)
+ add_label('P137') # operator (service, facility, ...)
+ add_label('P1029') # crew members (tripulation)
+ add_label('P225') # taxon name
+ add_value('P274') # chemical formula
+ add_label('P1346') # winner (sports, contests, ...)
+ add_value('P1120') # number of deaths
+ add_value('P498') # currency code (ISO 4217)
# URL
- add_url('P856', official=True) # official website
+ add_url('P856', official=True) # official website
attributes.append(WDArticle(language)) # wikipedia (user language)
if not language.startswith('en'):
attributes.append(WDArticle('en')) # wikipedia (english)
- add_url('P1324') # source code repository
- add_url('P1581') # blog
+ add_url('P1324') # source code repository
+ add_url('P1581') # blog
add_url('P434', url_id='musicbrainz_artist')
add_url('P435', url_id='musicbrainz_work')
add_url('P436', url_id='musicbrainz_release_group')
@@ -402,11 +406,11 @@ def get_attributes(language):
attributes.append(WDGeoAttribute('P625'))
# Image
- add_image('P15', priority=1, url_id='wikimedia_image') # route map
- add_image('P242', priority=2, url_id='wikimedia_image') # locator map
- add_image('P154', priority=3, url_id='wikimedia_image') # logo
- add_image('P18', priority=4, url_id='wikimedia_image') # image
- add_image('P41', priority=5, url_id='wikimedia_image') # flag
+ add_image('P15', priority=1, url_id='wikimedia_image') # route map
+ add_image('P242', priority=2, url_id='wikimedia_image') # locator map
+ add_image('P154', priority=3, url_id='wikimedia_image') # logo
+ add_image('P18', priority=4, url_id='wikimedia_image') # image
+ add_image('P41', priority=5, url_id='wikimedia_image') # flag
add_image('P2716', priority=6, url_id='wikimedia_image') # collage
add_image('P2910', priority=7, url_id='wikimedia_image') # icon
@@ -415,7 +419,7 @@ def get_attributes(language):
class WDAttribute:
- __slots__ = 'name',
+ __slots__ = ('name',)
def __init__(self, name):
self.name = name
@@ -443,14 +447,15 @@ class WDAttribute:
class WDAmountAttribute(WDAttribute):
-
def get_select(self):
return '?{name} ?{name}Unit'.replace('{name}', self.name)
def get_where(self):
return """ OPTIONAL { ?item p:{name} ?{name}Node .
?{name}Node rdf:type wikibase:BestRank ; ps:{name} ?{name} .
- OPTIONAL { ?{name}Node psv:{name}/wikibase:quantityUnit ?{name}Unit. } }""".replace('{name}', self.name)
+ OPTIONAL { ?{name}Node psv:{name}/wikibase:quantityUnit ?{name}Unit. } }""".replace(
+ '{name}', self.name
+ )
def get_group_by(self):
return self.get_select()
@@ -484,7 +489,9 @@ class WDArticle(WDAttribute):
return """OPTIONAL { ?article{language} schema:about ?item ;
schema:inLanguage "{language}" ;
schema:isPartOf <https://{language}.wikipedia.org/> ;
- schema:name ?articleName{language} . }""".replace('{language}', self.language)
+ schema:name ?articleName{language} . }""".replace(
+ '{language}', self.language
+ )
def get_group_by(self):
return self.get_select()
@@ -495,7 +502,6 @@ class WDArticle(WDAttribute):
class WDLabelAttribute(WDAttribute):
-
def get_select(self):
return '(group_concat(distinct ?{name}Label;separator=", ") as ?{name}Labels)'.replace('{name}', self.name)
@@ -526,14 +532,13 @@ class WDURLAttribute(WDAttribute):
value = value.split(',')[0]
url_id = self.url_id
if value.startswith(WDURLAttribute.HTTP_WIKIMEDIA_IMAGE):
- value = value[len(WDURLAttribute.HTTP_WIKIMEDIA_IMAGE):]
+ value = value[len(WDURLAttribute.HTTP_WIKIMEDIA_IMAGE) :]
url_id = 'wikimedia_image'
return get_external_url(url_id, value)
return value
class WDGeoAttribute(WDAttribute):
-
def get_label(self, language):
return "OpenStreetMap"
@@ -543,7 +548,9 @@ class WDGeoAttribute(WDAttribute):
def get_where(self):
return """OPTIONAL { ?item p:{name}/psv:{name} [
wikibase:geoLatitude ?{name}Lat ;
- wikibase:geoLongitude ?{name}Long ] }""".replace('{name}', self.name)
+ wikibase:geoLongitude ?{name}Long ] }""".replace(
+ '{name}', self.name
+ )
def get_group_by(self):
return self.get_select()
@@ -565,7 +572,7 @@ class WDGeoAttribute(WDAttribute):
class WDImageAttribute(WDURLAttribute):
- __slots__ = 'priority',
+ __slots__ = ('priority',)
def __init__(self, name, url_id=None, priority=100):
super().__init__(name, url_id)
@@ -573,7 +580,6 @@ class WDImageAttribute(WDURLAttribute):
class WDDateAttribute(WDAttribute):
-
def get_select(self):
return '?{name} ?{name}timePrecision ?{name}timeZone ?{name}timeCalendar'.replace('{name}', self.name)
@@ -587,7 +593,9 @@ class WDDateAttribute(WDAttribute):
wikibase:timePrecision ?{name}timePrecision ;
wikibase:timeTimezone ?{name}timeZone ;
wikibase:timeCalendarModel ?{name}timeCalendar ] . }
- hint:Prior hint:rangeSafe true;""".replace('{name}', self.name)
+ hint:Prior hint:rangeSafe true;""".replace(
+ '{name}', self.name
+ )
def get_group_by(self):
return self.get_select()
@@ -619,11 +627,12 @@ class WDDateAttribute(WDAttribute):
def format_13(self, value, locale):
timestamp = isoparse(value)
# precision: minute
- return get_datetime_format(format, locale=locale) \
- .replace("'", "") \
- .replace('{0}', format_time(timestamp, 'full', tzinfo=None,
- locale=locale)) \
+ return (
+ get_datetime_format(format, locale=locale)
+ .replace("'", "")
+ .replace('{0}', format_time(timestamp, 'full', tzinfo=None, locale=locale))
.replace('{1}', format_date(timestamp, 'short', locale=locale))
+ )
def format_14(self, value, locale):
# precision: second.
@@ -644,7 +653,7 @@ class WDDateAttribute(WDAttribute):
'11': ('format_11', 0), # day
'12': ('format_13', 0), # hour (not supported by babel, display minute)
'13': ('format_13', 0), # minute
- '14': ('format_14', 0) # second
+ '14': ('format_14', 0), # second
}
def get_str(self, result, language):
diff --git a/searx/engines/wikipedia.py b/searx/engines/wikipedia.py
index 5e34db9a7..cc806a8de 100644
--- a/searx/engines/wikipedia.py
+++ b/searx/engines/wikipedia.py
@@ -39,8 +39,7 @@ def request(query, params):
query = query.title()
language = url_lang(params['language'])
- params['url'] = search_url.format(title=quote(query),
- language=language)
+ params['url'] = search_url.format(title=quote(query), language=language)
if params['language'].lower() in language_variants.get(language, []):
params['headers']['Accept-Language'] = params['language'].lower()
@@ -63,8 +62,10 @@ def response(resp):
except:
pass
else:
- if api_result['type'] == 'https://mediawiki.org/wiki/HyperSwitch/errors/bad_request' \
- and api_result['detail'] == 'title-invalid-characters':
+ if (
+ api_result['type'] == 'https://mediawiki.org/wiki/HyperSwitch/errors/bad_request'
+ and api_result['detail'] == 'title-invalid-characters'
+ ):
return []
raise_for_httperror(resp)
@@ -81,11 +82,15 @@ def response(resp):
results.append({'url': wikipedia_link, 'title': title})
- results.append({'infobox': title,
- 'id': wikipedia_link,
- 'content': api_result.get('extract', ''),
- 'img_src': api_result.get('thumbnail', {}).get('source'),
- 'urls': [{'title': 'Wikipedia', 'url': wikipedia_link}]})
+ results.append(
+ {
+ 'infobox': title,
+ 'id': wikipedia_link,
+ 'content': api_result.get('extract', ''),
+ 'img_src': api_result.get('thumbnail', {}).get('source'),
+ 'urls': [{'title': 'Wikipedia', 'url': wikipedia_link}],
+ }
+ )
return results
diff --git a/searx/engines/wolframalpha_api.py b/searx/engines/wolframalpha_api.py
index 9c84e2809..1c882c582 100644
--- a/searx/engines/wolframalpha_api.py
+++ b/searx/engines/wolframalpha_api.py
@@ -36,8 +36,7 @@ img_alt_xpath = './@alt'
# pods to display as image in infobox
# this pods do return a plaintext, but they look better and are more useful as images
-image_pods = {'VisualRepresentation',
- 'Illustration'}
+image_pods = {'VisualRepresentation', 'Illustration'}
# do search-request
@@ -50,15 +49,17 @@ def request(query, params):
# replace private user area characters to make text legible
def replace_pua_chars(text):
- pua_chars = {'\uf522': '\u2192', # rigth arrow
- '\uf7b1': '\u2115', # set of natural numbers
- '\uf7b4': '\u211a', # set of rational numbers
- '\uf7b5': '\u211d', # set of real numbers
- '\uf7bd': '\u2124', # set of integer numbers
- '\uf74c': 'd', # differential
- '\uf74d': '\u212f', # euler's number
- '\uf74e': 'i', # imaginary number
- '\uf7d9': '='} # equals sign
+ pua_chars = {
+ '\uf522': '\u2192', # rigth arrow
+ '\uf7b1': '\u2115', # set of natural numbers
+ '\uf7b4': '\u211a', # set of rational numbers
+ '\uf7b5': '\u211d', # set of real numbers
+ '\uf7bd': '\u2124', # set of integer numbers
+ '\uf74c': 'd', # differential
+ '\uf74d': '\u212f', # euler's number
+ '\uf74e': 'i', # imaginary number
+ '\uf7d9': '=',
+ } # equals sign
for k, v in pua_chars.items():
text = text.replace(k, v)
@@ -112,9 +113,12 @@ def response(resp):
result_chunks.append({'label': pod_title, 'value': content})
elif image:
- result_chunks.append({'label': pod_title,
- 'image': {'src': image[0].xpath(img_src_xpath)[0],
- 'alt': image[0].xpath(img_alt_xpath)[0]}})
+ result_chunks.append(
+ {
+ 'label': pod_title,
+ 'image': {'src': image[0].xpath(img_src_xpath)[0], 'alt': image[0].xpath(img_alt_xpath)[0]},
+ }
+ )
if not result_chunks:
return []
@@ -122,13 +126,15 @@ def response(resp):
title = "Wolfram|Alpha (%s)" % infobox_title
# append infobox
- results.append({'infobox': infobox_title,
- 'attributes': result_chunks,
- 'urls': [{'title': 'Wolfram|Alpha', 'url': resp.request.headers['Referer']}]})
+ results.append(
+ {
+ 'infobox': infobox_title,
+ 'attributes': result_chunks,
+ 'urls': [{'title': 'Wolfram|Alpha', 'url': resp.request.headers['Referer']}],
+ }
+ )
# append link to site
- results.append({'url': resp.request.headers['Referer'],
- 'title': title,
- 'content': result_content})
+ results.append({'url': resp.request.headers['Referer'], 'title': title, 'content': result_content})
return results
diff --git a/searx/engines/wolframalpha_noapi.py b/searx/engines/wolframalpha_noapi.py
index 1f2cfa4e6..bad25602a 100644
--- a/searx/engines/wolframalpha_noapi.py
+++ b/searx/engines/wolframalpha_noapi.py
@@ -22,30 +22,29 @@ about = {
# search-url
url = 'https://www.wolframalpha.com/'
-search_url = url + 'input/json.jsp'\
- '?async=false'\
- '&banners=raw'\
- '&debuggingdata=false'\
- '&format=image,plaintext,imagemap,minput,moutput'\
- '&formattimeout=2'\
- '&{query}'\
- '&output=JSON'\
- '&parsetimeout=2'\
- '&proxycode={token}'\
- '&scantimeout=0.5'\
- '&sponsorcategories=true'\
+search_url = (
+ url + 'input/json.jsp'
+ '?async=false'
+ '&banners=raw'
+ '&debuggingdata=false'
+ '&format=image,plaintext,imagemap,minput,moutput'
+ '&formattimeout=2'
+ '&{query}'
+ '&output=JSON'
+ '&parsetimeout=2'
+ '&proxycode={token}'
+ '&scantimeout=0.5'
+ '&sponsorcategories=true'
'&statemethod=deploybutton'
+)
referer_url = url + 'input/?{query}'
-token = {'value': '',
- 'last_updated': None}
+token = {'value': '', 'last_updated': None}
# pods to display as image in infobox
# this pods do return a plaintext, but they look better and are more useful as images
-image_pods = {'VisualRepresentation',
- 'Illustration',
- 'Symbol'}
+image_pods = {'VisualRepresentation', 'Illustration', 'Symbol'}
# seems, wolframalpha resets its token in every hour
@@ -115,12 +114,20 @@ def response(resp):
if not result_chunks:
return []
- results.append({'infobox': infobox_title,
- 'attributes': result_chunks,
- 'urls': [{'title': 'Wolfram|Alpha', 'url': resp.request.headers['Referer']}]})
-
- results.append({'url': resp.request.headers['Referer'],
- 'title': 'Wolfram|Alpha (' + infobox_title + ')',
- 'content': result_content})
+ results.append(
+ {
+ 'infobox': infobox_title,
+ 'attributes': result_chunks,
+ 'urls': [{'title': 'Wolfram|Alpha', 'url': resp.request.headers['Referer']}],
+ }
+ )
+
+ results.append(
+ {
+ 'url': resp.request.headers['Referer'],
+ 'title': 'Wolfram|Alpha (' + infobox_title + ')',
+ 'content': result_content,
+ }
+ )
return results
diff --git a/searx/engines/wordnik.py b/searx/engines/wordnik.py
index 0c3785cfb..21eaeccc3 100644
--- a/searx/engines/wordnik.py
+++ b/searx/engines/wordnik.py
@@ -48,7 +48,7 @@ def response(resp):
def_abbr = extract_text(def_item.xpath('.//abbr')).strip()
def_text = extract_text(def_item).strip()
if def_abbr:
- def_text = def_text[len(def_abbr):].strip()
+ def_text = def_text[len(def_abbr) :].strip()
src_defs.append((def_abbr, def_text))
definitions.append((src_text, src_defs))
@@ -66,9 +66,11 @@ def response(resp):
infobox += f"<li><i>{def_abbr}</i> {def_text}</li>"
infobox += "</ul>"
- results.append({
- 'infobox': word,
- 'content': infobox,
- })
+ results.append(
+ {
+ 'infobox': word,
+ 'content': infobox,
+ }
+ )
return results
diff --git a/searx/engines/www1x.py b/searx/engines/www1x.py
index 96b8d680c..f6b82944d 100644
--- a/searx/engines/www1x.py
+++ b/searx/engines/www1x.py
@@ -46,12 +46,16 @@ def response(resp):
thumbnail_src = urljoin(gallery_url, eval_xpath_getindex(link, './/img', 0).attrib['src'])
# append result
- results.append({'url': url,
- 'title': title,
- 'img_src': thumbnail_src,
- 'content': '',
- 'thumbnail_src': thumbnail_src,
- 'template': 'images.html'})
+ results.append(
+ {
+ 'url': url,
+ 'title': title,
+ 'img_src': thumbnail_src,
+ 'content': '',
+ 'thumbnail_src': thumbnail_src,
+ 'template': 'images.html',
+ }
+ )
# return results
return results
diff --git a/searx/engines/xpath.py b/searx/engines/xpath.py
index 08677b708..2737bf94a 100644
--- a/searx/engines/xpath.py
+++ b/searx/engines/xpath.py
@@ -56,7 +56,7 @@ Replacements are:
"""
-lang_all='en'
+lang_all = 'en'
'''Replacement ``{lang}`` in :py:obj:`search_url` if language ``all`` is
selected.
'''
@@ -110,9 +110,9 @@ requested by the user, the URL paramter is an empty string. The
time_range_map = {
'day': 24,
- 'week': 24*7,
- 'month': 24*30,
- 'year': 24*365,
+ 'week': 24 * 7,
+ 'month': 24 * 30,
+ 'year': 24 * 365,
}
'''Maps time range value from user to ``{time_range_val}`` in
:py:obj:`time_range_url`.
@@ -129,11 +129,7 @@ time_range_map = {
safe_search_support = False
'''Engine supports safe-search.'''
-safe_search_map = {
- 0: '&filter=none',
- 1: '&filter=moderate',
- 2: '&filter=strict'
-}
+safe_search_map = {0: '&filter=none', 1: '&filter=moderate', 2: '&filter=strict'}
'''Maps safe-search value to ``{safe_search}`` in :py:obj:`search_url`.
.. code:: yaml
@@ -146,10 +142,9 @@ safe_search_map = {
'''
-def request(query, params):
- '''Build request parameters (see :ref:`engine request`).
- '''
+def request(query, params):
+ '''Build request parameters (see :ref:`engine request`).'''
lang = lang_all
if params['language'] != 'all':
lang = params['language'][:2]
@@ -167,8 +162,8 @@ def request(query, params):
'query': urlencode({'q': query})[2:],
'lang': lang,
'pageno': (params['pageno'] - 1) * page_size + first_page_num,
- 'time_range' : time_range,
- 'safe_search' : safe_search,
+ 'time_range': time_range,
+ 'safe_search': safe_search,
}
params['url'] = search_url.format(**fargs)
@@ -176,10 +171,9 @@ def request(query, params):
return params
-def response(resp):
- '''Scrap *results* from the response (see :ref:`engine results`).
- '''
+def response(resp):
+ '''Scrap *results* from the response (see :ref:`engine results`).'''
results = []
dom = html.fromstring(resp.text)
is_onion = 'onions' in categories
@@ -200,10 +194,7 @@ def response(resp):
# add alternative cached url if available
if cached_xpath:
- tmp_result['cached_url'] = (
- cached_url
- + extract_text(eval_xpath_list(result, cached_xpath, min_len=1))
- )
+ tmp_result['cached_url'] = cached_url + extract_text(eval_xpath_list(result, cached_xpath, min_len=1))
if is_onion:
tmp_result['is_onion'] = True
@@ -213,31 +204,27 @@ def response(resp):
else:
if cached_xpath:
for url, title, content, cached in zip(
- (extract_url(x, search_url) for
- x in eval_xpath_list(dom, url_xpath)),
+ (extract_url(x, search_url) for x in eval_xpath_list(dom, url_xpath)),
map(extract_text, eval_xpath_list(dom, title_xpath)),
map(extract_text, eval_xpath_list(dom, content_xpath)),
- map(extract_text, eval_xpath_list(dom, cached_xpath))
+ map(extract_text, eval_xpath_list(dom, cached_xpath)),
):
- results.append({
- 'url': url,
- 'title': title,
- 'content': content,
- 'cached_url': cached_url + cached, 'is_onion': is_onion
- })
+ results.append(
+ {
+ 'url': url,
+ 'title': title,
+ 'content': content,
+ 'cached_url': cached_url + cached,
+ 'is_onion': is_onion,
+ }
+ )
else:
for url, title, content in zip(
- (extract_url(x, search_url) for
- x in eval_xpath_list(dom, url_xpath)),
+ (extract_url(x, search_url) for x in eval_xpath_list(dom, url_xpath)),
map(extract_text, eval_xpath_list(dom, title_xpath)),
- map(extract_text, eval_xpath_list(dom, content_xpath))
+ map(extract_text, eval_xpath_list(dom, content_xpath)),
):
- results.append({
- 'url': url,
- 'title': title,
- 'content': content,
- 'is_onion': is_onion
- })
+ results.append({'url': url, 'title': title, 'content': content, 'is_onion': is_onion})
if suggestion_xpath:
for suggestion in eval_xpath(dom, suggestion_xpath):
diff --git a/searx/engines/yacy.py b/searx/engines/yacy.py
index fbd99c47b..12e7305db 100644
--- a/searx/engines/yacy.py
+++ b/searx/engines/yacy.py
@@ -30,18 +30,16 @@ http_digest_auth_pass = ""
# search-url
base_url = 'http://localhost:8090'
-search_url = '/yacysearch.json?{query}'\
- '&startRecord={offset}'\
- '&maximumRecords={limit}'\
- '&contentdom={search_type}'\
- '&resource=global'
+search_url = (
+ '/yacysearch.json?{query}'
+ '&startRecord={offset}'
+ '&maximumRecords={limit}'
+ '&contentdom={search_type}'
+ '&resource=global'
+)
# yacy specific type-definitions
-search_types = {'general': 'text',
- 'images': 'image',
- 'files': 'app',
- 'music': 'audio',
- 'videos': 'video'}
+search_types = {'general': 'text', 'images': 'image', 'files': 'app', 'music': 'audio', 'videos': 'video'}
# do search-request
@@ -49,11 +47,9 @@ def request(query, params):
offset = (params['pageno'] - 1) * number_of_results
search_type = search_types.get(params.get('category'), '0')
- params['url'] = base_url +\
- search_url.format(query=urlencode({'query': query}),
- offset=offset,
- limit=number_of_results,
- search_type=search_type)
+ params['url'] = base_url + search_url.format(
+ query=urlencode({'query': query}), offset=offset, limit=number_of_results, search_type=search_type
+ )
if http_digest_auth_user and http_digest_auth_pass:
params['auth'] = DigestAuth(http_digest_auth_user, http_digest_auth_pass)
@@ -93,21 +89,29 @@ def response(resp):
continue
# append result
- results.append({'url': result_url,
- 'title': result['title'],
- 'content': '',
- 'img_src': result['image'],
- 'template': 'images.html'})
+ results.append(
+ {
+ 'url': result_url,
+ 'title': result['title'],
+ 'content': '',
+ 'img_src': result['image'],
+ 'template': 'images.html',
+ }
+ )
# parse general results
else:
publishedDate = parser.parse(result['pubDate'])
# append result
- results.append({'url': result['link'],
- 'title': result['title'],
- 'content': html_to_text(result['description']),
- 'publishedDate': publishedDate})
+ results.append(
+ {
+ 'url': result['link'],
+ 'title': result['title'],
+ 'content': html_to_text(result['description']),
+ 'publishedDate': publishedDate,
+ }
+ )
# TODO parse video, audio and file results
diff --git a/searx/engines/yahoo.py b/searx/engines/yahoo.py
index bd6e6721c..08bde6665 100644
--- a/searx/engines/yahoo.py
+++ b/searx/engines/yahoo.py
@@ -50,59 +50,59 @@ language_aliases = {
}
lang2domain = {
- 'zh_chs' : 'hk.search.yahoo.com',
- 'zh_cht' : 'tw.search.yahoo.com',
- 'en' : 'search.yahoo.com',
-
- 'bg' : 'search.yahoo.com',
- 'cs' : 'search.yahoo.com',
- 'da' : 'search.yahoo.com',
- 'el' : 'search.yahoo.com',
- 'et' : 'search.yahoo.com',
- 'he' : 'search.yahoo.com',
- 'hr' : 'search.yahoo.com',
- 'ja' : 'search.yahoo.com',
- 'ko' : 'search.yahoo.com',
- 'sk' : 'search.yahoo.com',
- 'sl' : 'search.yahoo.com',
-
+ 'zh_chs': 'hk.search.yahoo.com',
+ 'zh_cht': 'tw.search.yahoo.com',
+ 'en': 'search.yahoo.com',
+ 'bg': 'search.yahoo.com',
+ 'cs': 'search.yahoo.com',
+ 'da': 'search.yahoo.com',
+ 'el': 'search.yahoo.com',
+ 'et': 'search.yahoo.com',
+ 'he': 'search.yahoo.com',
+ 'hr': 'search.yahoo.com',
+ 'ja': 'search.yahoo.com',
+ 'ko': 'search.yahoo.com',
+ 'sk': 'search.yahoo.com',
+ 'sl': 'search.yahoo.com',
}
"""Map language to domain"""
+
def _get_language(params):
lang = language_aliases.get(params['language'])
if lang is None:
- lang = match_language(
- params['language'], supported_languages, language_aliases
- )
+ lang = match_language(params['language'], supported_languages, language_aliases)
lang = lang.split('-')[0]
- logger.debug("params['language']: %s --> %s" , params['language'], lang)
+ logger.debug("params['language']: %s --> %s", params['language'], lang)
return lang
+
def request(query, params):
"""build request"""
offset = (params['pageno'] - 1) * 7 + 1
- lang = _get_language(params)
- age, btf = time_range_dict.get(
- params['time_range'], ('', ''))
-
- args = urlencode({
- 'p' : query,
- 'ei' : 'UTF-8',
- 'fl' : 1,
- 'vl' : 'lang_' + lang,
- 'btf' : btf,
- 'fr2' : 'time',
- 'age' : age,
- 'b' : offset,
- 'xargs' :0
- })
+ lang = _get_language(params)
+ age, btf = time_range_dict.get(params['time_range'], ('', ''))
+
+ args = urlencode(
+ {
+ 'p': query,
+ 'ei': 'UTF-8',
+ 'fl': 1,
+ 'vl': 'lang_' + lang,
+ 'btf': btf,
+ 'fr2': 'time',
+ 'age': age,
+ 'b': offset,
+ 'xargs': 0,
+ }
+ )
domain = lang2domain.get(lang, '%s.search.yahoo.com' % lang)
params['url'] = 'https://%s/search?%s' % (domain, args)
return params
+
def parse_url(url_string):
"""remove yahoo-specific tracking-url"""
@@ -121,6 +121,7 @@ def parse_url(url_string):
end = min(endpositions)
return unquote(url_string[start:end])
+
def response(resp):
"""parse response"""
@@ -140,18 +141,12 @@ def response(resp):
offset = len(extract_text(title.xpath('span')))
title = extract_text(title)[offset:]
- content = eval_xpath_getindex(
- result, './/div[contains(@class, "compText")]', 0, default=''
- )
+ content = eval_xpath_getindex(result, './/div[contains(@class, "compText")]', 0, default='')
if content:
content = extract_text(content)
# append result
- results.append({
- 'url': url,
- 'title': title,
- 'content': content
- })
+ results.append({'url': url, 'title': title, 'content': content})
for suggestion in eval_xpath_list(dom, '//div[contains(@class, "AlsoTry")]//table//a'):
# append suggestion
@@ -167,6 +162,6 @@ def _fetch_supported_languages(resp):
offset = len('lang_')
for val in eval_xpath_list(dom, '//div[contains(@class, "lang-item")]/input/@value'):
- supported_languages.append( val[offset:] )
+ supported_languages.append(val[offset:])
return supported_languages
diff --git a/searx/engines/yahoo_news.py b/searx/engines/yahoo_news.py
index ec07cd408..06f090f74 100644
--- a/searx/engines/yahoo_news.py
+++ b/searx/engines/yahoo_news.py
@@ -39,36 +39,31 @@ paging = True
categories = ['news']
# search-url
-search_url = (
- 'https://news.search.yahoo.com/search'
- '?{query}&b={offset}'
- )
+search_url = 'https://news.search.yahoo.com/search' '?{query}&b={offset}'
AGO_RE = re.compile(r'([0-9]+)\s*(year|month|week|day|minute|hour)')
AGO_TIMEDELTA = {
- 'minute': timedelta(minutes=1),
- 'hour': timedelta(hours=1),
- 'day': timedelta(days=1),
- 'week': timedelta(days=7),
- 'month': timedelta(days=30),
- 'year': timedelta(days=365),
+ 'minute': timedelta(minutes=1),
+ 'hour': timedelta(hours=1),
+ 'day': timedelta(days=1),
+ 'week': timedelta(days=7),
+ 'month': timedelta(days=30),
+ 'year': timedelta(days=365),
}
+
def request(query, params):
offset = (params['pageno'] - 1) * 10 + 1
- params['url'] = search_url.format(
- offset = offset,
- query = urlencode({'p': query})
- )
+ params['url'] = search_url.format(offset=offset, query=urlencode({'p': query}))
logger.debug("query_url --> %s", params['url'])
return params
+
def response(resp):
results = []
dom = html.fromstring(resp.text)
-
# parse results
for result in eval_xpath_list(dom, '//ol[contains(@class,"searchCenterMiddle")]//li'):
@@ -80,12 +75,7 @@ def response(resp):
content = extract_text(result.xpath('.//p'))
img_src = eval_xpath_getindex(result, './/img/@data-src', 0, None)
- item = {
- 'url': url,
- 'title': title,
- 'content': content,
- 'img_src' : img_src
- }
+ item = {'url': url, 'title': title, 'content': content, 'img_src': img_src}
pub_date = extract_text(result.xpath('.//span[contains(@class,"s-time")]'))
ago = AGO_RE.search(pub_date)
diff --git a/searx/engines/youtube_api.py b/searx/engines/youtube_api.py
index ed27db07b..52db45960 100644
--- a/searx/engines/youtube_api.py
+++ b/searx/engines/youtube_api.py
@@ -27,17 +27,18 @@ api_key = None
base_url = 'https://www.googleapis.com/youtube/v3/search'
search_url = base_url + '?part=snippet&{query}&maxResults=20&key={api_key}'
-embedded_url = '<iframe width="540" height="304" ' +\
- 'data-src="https://www.youtube-nocookie.com/embed/{videoid}" ' +\
- 'frameborder="0" allowfullscreen></iframe>'
+embedded_url = (
+ '<iframe width="540" height="304" '
+ + 'data-src="https://www.youtube-nocookie.com/embed/{videoid}" '
+ + 'frameborder="0" allowfullscreen></iframe>'
+)
base_youtube_url = 'https://www.youtube.com/watch?v='
# do search-request
def request(query, params):
- params['url'] = search_url.format(query=urlencode({'q': query}),
- api_key=api_key)
+ params['url'] = search_url.format(query=urlencode({'q': query}), api_key=api_key)
# add language tag if specified
if params['language'] != 'all':
@@ -79,13 +80,17 @@ def response(resp):
embedded = embedded_url.format(videoid=videoid)
# append result
- results.append({'url': url,
- 'title': title,
- 'content': content,
- 'template': 'videos.html',
- 'publishedDate': publishedDate,
- 'embedded': embedded,
- 'thumbnail': thumbnail})
+ results.append(
+ {
+ 'url': url,
+ 'title': title,
+ 'content': content,
+ 'template': 'videos.html',
+ 'publishedDate': publishedDate,
+ 'embedded': embedded,
+ 'thumbnail': thumbnail,
+ }
+ )
# return results
return results
diff --git a/searx/engines/youtube_noapi.py b/searx/engines/youtube_noapi.py
index 68b75bc72..239830cc7 100644
--- a/searx/engines/youtube_noapi.py
+++ b/searx/engines/youtube_noapi.py
@@ -30,14 +30,13 @@ search_url = base_url + '?search_query={query}&page={page}'
time_range_url = '&sp=EgII{time_range}%253D%253D'
# the key seems to be constant
next_page_url = 'https://www.youtube.com/youtubei/v1/search?key=AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8'
-time_range_dict = {'day': 'Ag',
- 'week': 'Aw',
- 'month': 'BA',
- 'year': 'BQ'}
+time_range_dict = {'day': 'Ag', 'week': 'Aw', 'month': 'BA', 'year': 'BQ'}
-embedded_url = '<iframe width="540" height="304" ' +\
- 'data-src="https://www.youtube-nocookie.com/embed/{videoid}" ' +\
- 'frameborder="0" allowfullscreen></iframe>'
+embedded_url = (
+ '<iframe width="540" height="304" '
+ + 'data-src="https://www.youtube-nocookie.com/embed/{videoid}" '
+ + 'frameborder="0" allowfullscreen></iframe>'
+)
base_youtube_url = 'https://www.youtube.com/watch?v='
@@ -51,10 +50,12 @@ def request(query, params):
else:
params['url'] = next_page_url
params['method'] = 'POST'
- params['data'] = dumps({
- 'context': {"client": {"clientName": "WEB", "clientVersion": "2.20210310.12.01"}},
- 'continuation': params['engine_data']['next_page_token'],
- })
+ params['data'] = dumps(
+ {
+ 'context': {"client": {"clientName": "WEB", "clientVersion": "2.20210310.12.01"}},
+ 'continuation': params['engine_data']['next_page_token'],
+ }
+ )
params['headers']['Content-Type'] = 'application/json'
params['headers']['Cookie'] = "CONSENT=YES+cb.%s-17-p0.en+F+941;" % datetime.now().strftime("%Y%m%d")
@@ -71,34 +72,42 @@ def response(resp):
def parse_next_page_response(response_text):
results = []
result_json = loads(response_text)
- for section in (result_json['onResponseReceivedCommands'][0]
- .get('appendContinuationItemsAction')['continuationItems'][0]
- .get('itemSectionRenderer')['contents']):
+ for section in (
+ result_json['onResponseReceivedCommands'][0]
+ .get('appendContinuationItemsAction')['continuationItems'][0]
+ .get('itemSectionRenderer')['contents']
+ ):
if 'videoRenderer' not in section:
continue
section = section['videoRenderer']
content = "-"
if 'descriptionSnippet' in section:
content = ' '.join(x['text'] for x in section['descriptionSnippet']['runs'])
- results.append({
- 'url': base_youtube_url + section['videoId'],
- 'title': ' '.join(x['text'] for x in section['title']['runs']),
- 'content': content,
- 'author': section['ownerText']['runs'][0]['text'],
- 'length': section['lengthText']['simpleText'],
- 'template': 'videos.html',
- 'embedded': embedded_url.format(videoid=section['videoId']),
- 'thumbnail': section['thumbnail']['thumbnails'][-1]['url'],
- })
+ results.append(
+ {
+ 'url': base_youtube_url + section['videoId'],
+ 'title': ' '.join(x['text'] for x in section['title']['runs']),
+ 'content': content,
+ 'author': section['ownerText']['runs'][0]['text'],
+ 'length': section['lengthText']['simpleText'],
+ 'template': 'videos.html',
+ 'embedded': embedded_url.format(videoid=section['videoId']),
+ 'thumbnail': section['thumbnail']['thumbnails'][-1]['url'],
+ }
+ )
try:
- token = result_json['onResponseReceivedCommands'][0]\
- .get('appendContinuationItemsAction')['continuationItems'][1]\
- .get('continuationItemRenderer')['continuationEndpoint']\
+ token = (
+ result_json['onResponseReceivedCommands'][0]
+ .get('appendContinuationItemsAction')['continuationItems'][1]
+ .get('continuationItemRenderer')['continuationEndpoint']
.get('continuationCommand')['token']
- results.append({
- "engine_data": token,
- "key": "next_page_token",
- })
+ )
+ results.append(
+ {
+ "engine_data": token,
+ "key": "next_page_token",
+ }
+ )
except:
pass
@@ -107,26 +116,32 @@ def parse_next_page_response(response_text):
def parse_first_page_response(response_text):
results = []
- results_data = response_text[response_text.find('ytInitialData'):]
- results_data = results_data[results_data.find('{'):results_data.find(';</script>')]
+ results_data = response_text[response_text.find('ytInitialData') :]
+ results_data = results_data[results_data.find('{') : results_data.find(';</script>')]
results_json = loads(results_data) if results_data else {}
- sections = results_json.get('contents', {})\
- .get('twoColumnSearchResultsRenderer', {})\
- .get('primaryContents', {})\
- .get('sectionListRenderer', {})\
- .get('contents', [])
+ sections = (
+ results_json.get('contents', {})
+ .get('twoColumnSearchResultsRenderer', {})
+ .get('primaryContents', {})
+ .get('sectionListRenderer', {})
+ .get('contents', [])
+ )
for section in sections:
if "continuationItemRenderer" in section:
- next_page_token = section["continuationItemRenderer"]\
- .get("continuationEndpoint", {})\
- .get("continuationCommand", {})\
+ next_page_token = (
+ section["continuationItemRenderer"]
+ .get("continuationEndpoint", {})
+ .get("continuationCommand", {})
.get("token", "")
+ )
if next_page_token:
- results.append({
- "engine_data": next_page_token,
- "key": "next_page_token",
- })
+ results.append(
+ {
+ "engine_data": next_page_token,
+ "key": "next_page_token",
+ }
+ )
for video_container in section.get('itemSectionRenderer', {}).get('contents', []):
video = video_container.get('videoRenderer', {})
videoid = video.get('videoId')
@@ -140,14 +155,18 @@ def parse_first_page_response(response_text):
length = get_text_from_json(video.get('lengthText', {}))
# append result
- results.append({'url': url,
- 'title': title,
- 'content': content,
- 'author': author,
- 'length': length,
- 'template': 'videos.html',
- 'embedded': embedded,
- 'thumbnail': thumbnail})
+ results.append(
+ {
+ 'url': url,
+ 'title': title,
+ 'content': content,
+ 'author': author,
+ 'length': length,
+ 'template': 'videos.html',
+ 'embedded': embedded,
+ 'thumbnail': thumbnail,
+ }
+ )
# return results
return results
diff --git a/searx/engines/zlibrary.py b/searx/engines/zlibrary.py
index 180e9e355..81d93ac84 100644
--- a/searx/engines/zlibrary.py
+++ b/searx/engines/zlibrary.py
@@ -31,25 +31,23 @@ categories = ['files']
paging = True
base_url = ''
+
def init(engine_settings=None):
- global base_url # pylint: disable=global-statement
+ global base_url # pylint: disable=global-statement
if "base_url" not in engine_settings:
resp = http_get('https://z-lib.org', timeout=5.0)
if resp.ok:
dom = html.fromstring(resp.text)
- base_url = "https:" + extract_text(eval_xpath(dom,
- './/a[contains(@class, "domain-check-link") and @data-mode="books"]/@href'
- ))
+ base_url = "https:" + extract_text(
+ eval_xpath(dom, './/a[contains(@class, "domain-check-link") and @data-mode="books"]/@href')
+ )
logger.debug("using base_url: %s" % base_url)
def request(query, params):
search_url = base_url + '/s/{search_query}/?page={pageno}'
- params['url'] = search_url.format(
- search_query=quote(query),
- pageno=params['pageno']
- )
+ params['url'] = search_url.format(search_query=quote(query), pageno=params['pageno'])
return params
@@ -60,36 +58,34 @@ def response(resp):
for item in dom.xpath('//div[@id="searchResultBox"]//div[contains(@class, "resItemBox")]'):
result = {}
- result["url"] = base_url + \
- item.xpath('(.//a[starts-with(@href, "/book/")])[1]/@href')[0]
+ result["url"] = base_url + item.xpath('(.//a[starts-with(@href, "/book/")])[1]/@href')[0]
result["title"] = extract_text(eval_xpath(item, './/*[@itemprop="name"]'))
- year = extract_text(eval_xpath(
- item, './/div[contains(@class, "property_year")]//div[contains(@class, "property_value")]'))
+ year = extract_text(
+ eval_xpath(item, './/div[contains(@class, "property_year")]//div[contains(@class, "property_value")]')
+ )
if year:
year = '(%s) ' % year
- result["content"] = "{year}{authors}. {publisher}. Language: {language}. {file_type}. \
+ result[
+ "content"
+ ] = "{year}{authors}. {publisher}. Language: {language}. {file_type}. \
Book rating: {book_rating}, book quality: {book_quality}".format(
- year = year,
- authors = extract_text(eval_xpath(item, './/div[@class="authors"]')),
- publisher = extract_text(eval_xpath(item, './/div[@title="Publisher"]')),
- file_type = extract_text(
- eval_xpath(
- item,
- './/div[contains(@class, "property__file")]//div[contains(@class, "property_value")]')),
- language = extract_text(
- eval_xpath(
- item,
- './/div[contains(@class, "property_language")]//div[contains(@class, "property_value")]')),
- book_rating = extract_text(
- eval_xpath(
- item, './/span[contains(@class, "book-rating-interest-score")]')),
- book_quality = extract_text(
- eval_xpath(
- item, './/span[contains(@class, "book-rating-quality-score")]')),
- )
+ year=year,
+ authors=extract_text(eval_xpath(item, './/div[@class="authors"]')),
+ publisher=extract_text(eval_xpath(item, './/div[@title="Publisher"]')),
+ file_type=extract_text(
+ eval_xpath(item, './/div[contains(@class, "property__file")]//div[contains(@class, "property_value")]')
+ ),
+ language=extract_text(
+ eval_xpath(
+ item, './/div[contains(@class, "property_language")]//div[contains(@class, "property_value")]'
+ )
+ ),
+ book_rating=extract_text(eval_xpath(item, './/span[contains(@class, "book-rating-interest-score")]')),
+ book_quality=extract_text(eval_xpath(item, './/span[contains(@class, "book-rating-quality-score")]')),
+ )
result["img_src"] = extract_text(eval_xpath(item, './/img[contains(@class, "cover")]/@data-src'))