summaryrefslogtreecommitdiff
path: root/searx/engines
diff options
context:
space:
mode:
authorAdam Tauber <asciimoo@gmail.com>2018-08-19 13:22:22 +0200
committerGitHub <noreply@github.com>2018-08-19 13:22:22 +0200
commitb75f1b6cc39a94989a74d52eb0f1267c3e3c665e (patch)
treed3bab81ca2071196b1b4223d6d2db7d408b79bf2 /searx/engines
parente7f7eda18cc69287f30c512a98b4e90453bcd8e7 (diff)
parent931c1bb0f663bc13998f5a78ae7cd9485d37453c (diff)
downloadsearxng-b75f1b6cc39a94989a74d52eb0f1267c3e3c665e.tar.gz
searxng-b75f1b6cc39a94989a74d52eb0f1267c3e3c665e.zip
Merge branch 'master' into patch-2
Diffstat (limited to 'searx/engines')
-rw-r--r--searx/engines/__init__.py21
-rw-r--r--searx/engines/acgsou.py75
-rw-r--r--searx/engines/archlinux.py4
-rwxr-xr-xsearx/engines/base.py2
-rw-r--r--searx/engines/bing.py9
-rw-r--r--searx/engines/bing_images.py23
-rw-r--r--searx/engines/bing_news.py6
-rw-r--r--searx/engines/bing_videos.py7
-rw-r--r--searx/engines/currency_convert.py14
-rw-r--r--searx/engines/dailymotion.py3
-rw-r--r--searx/engines/duckduckgo.py51
-rw-r--r--searx/engines/duckduckgo_definitions.py7
-rw-r--r--searx/engines/duckduckgo_images.py5
-rw-r--r--searx/engines/findx.py115
-rw-r--r--searx/engines/gigablast.py2
-rw-r--r--searx/engines/google.py34
-rw-r--r--searx/engines/google_images.py41
-rw-r--r--searx/engines/google_news.py10
-rw-r--r--searx/engines/nyaa.py4
-rw-r--r--searx/engines/piratebay.py2
-rw-r--r--searx/engines/qwant.py15
-rw-r--r--searx/engines/swisscows.py8
-rw-r--r--searx/engines/wikidata.py19
-rw-r--r--searx/engines/wikipedia.py9
-rw-r--r--searx/engines/xpath.py2
-rw-r--r--searx/engines/yahoo.py24
-rw-r--r--searx/engines/yahoo_news.py7
27 files changed, 343 insertions, 176 deletions
diff --git a/searx/engines/__init__.py b/searx/engines/__init__.py
index af3cf8110..c2f9f3da4 100644
--- a/searx/engines/__init__.py
+++ b/searx/engines/__init__.py
@@ -20,13 +20,14 @@ import sys
import threading
from os.path import realpath, dirname
from io import open
+from babel.localedata import locale_identifiers
from flask_babel import gettext
from operator import itemgetter
from json import loads
from requests import get
from searx import settings
from searx import logger
-from searx.utils import load_module
+from searx.utils import load_module, match_language
logger = logger.getChild('engines')
@@ -38,6 +39,8 @@ engines = {}
categories = {'general': []}
languages = loads(open(engine_dir + '/../data/engines_languages.json', 'r', encoding='utf-8').read())
+babel_langs = [lang_parts[0] + '-' + lang_parts[-1] if len(lang_parts) > 1 else lang_parts[0]
+ for lang_parts in (lang_code.split('_') for lang_code in locale_identifiers())]
engine_shortcuts = {}
engine_default_args = {'paging': False,
@@ -97,6 +100,22 @@ def load_engine(engine_data):
if engine_data['name'] in languages:
setattr(engine, 'supported_languages', languages[engine_data['name']])
+ # find custom aliases for non standard language codes
+ if hasattr(engine, 'supported_languages'):
+ if hasattr(engine, 'language_aliases'):
+ language_aliases = getattr(engine, 'language_aliases')
+ else:
+ language_aliases = {}
+
+ for engine_lang in getattr(engine, 'supported_languages'):
+ iso_lang = match_language(engine_lang, babel_langs, fallback=None)
+ if iso_lang and iso_lang != engine_lang and not engine_lang.startswith(iso_lang) and \
+ iso_lang not in getattr(engine, 'supported_languages'):
+ language_aliases[iso_lang] = engine_lang
+
+ if language_aliases:
+ setattr(engine, 'language_aliases', language_aliases)
+
# assign language fetching method if auxiliary method exists
if hasattr(engine, '_fetch_supported_languages'):
setattr(engine, 'fetch_supported_languages',
diff --git a/searx/engines/acgsou.py b/searx/engines/acgsou.py
new file mode 100644
index 000000000..cca28f0db
--- /dev/null
+++ b/searx/engines/acgsou.py
@@ -0,0 +1,75 @@
+"""
+ Acgsou (Japanese Animation/Music/Comics Bittorrent tracker)
+
+ @website https://www.acgsou.com/
+ @provide-api no
+ @using-api no
+ @results HTML
+ @stable no (HTML can change)
+ @parse url, title, content, seed, leech, torrentfile
+"""
+
+from lxml import html
+from searx.engines.xpath import extract_text
+from searx.url_utils import urlencode
+from searx.utils import get_torrent_size, int_or_zero
+
+# engine dependent config
+categories = ['files', 'images', 'videos', 'music']
+paging = True
+
+# search-url
+base_url = 'http://www.acgsou.com/'
+search_url = base_url + 'search.php?{query}&page={offset}'
+# xpath queries
+xpath_results = '//table[contains(@class, "list_style table_fixed")]//tr[not(th)]'
+xpath_category = './/td[2]/a[1]'
+xpath_title = './/td[3]/a[last()]'
+xpath_torrent_links = './/td[3]/a'
+xpath_filesize = './/td[4]/text()'
+
+
+def request(query, params):
+ query = urlencode({'keyword': query})
+ params['url'] = search_url.format(query=query, offset=params['pageno'])
+ return params
+
+
+def response(resp):
+ results = []
+ dom = html.fromstring(resp.text)
+ for result in dom.xpath(xpath_results):
+ # defaults
+ filesize = 0
+ magnet_link = "magnet:?xt=urn:btih:{}&tr=http://tracker.acgsou.com:2710/announce"
+ torrent_link = ""
+
+ try:
+ category = extract_text(result.xpath(xpath_category)[0])
+ except:
+ pass
+
+ page_a = result.xpath(xpath_title)[0]
+ title = extract_text(page_a)
+ href = base_url + page_a.attrib.get('href')
+
+ magnet_link = magnet_link.format(page_a.attrib.get('href')[5:-5])
+
+ try:
+ filesize_info = result.xpath(xpath_filesize)[0]
+ filesize = filesize_info[:-2]
+ filesize_multiplier = filesize_info[-2:]
+ filesize = get_torrent_size(filesize, filesize_multiplier)
+ except:
+ pass
+ # I didn't add download/seed/leech count since as I figured out they are generated randomly everytime
+ content = u'Category: "{category}".'
+ content = content.format(category=category)
+
+ results.append({'url': href,
+ 'title': title,
+ 'content': content,
+ 'filesize': filesize,
+ 'magnetlink': magnet_link,
+ 'template': 'torrent.html'})
+ return results
diff --git a/searx/engines/archlinux.py b/searx/engines/archlinux.py
index 245bc50b2..fc08112af 100644
--- a/searx/engines/archlinux.py
+++ b/searx/engines/archlinux.py
@@ -99,13 +99,13 @@ supported_languages = dict(lang_urls, **main_langs)
# do search-request
def request(query, params):
- # translate the locale (e.g. 'en_US') to language code ('en')
+ # translate the locale (e.g. 'en-US') to language code ('en')
language = locale_to_lang_code(params['language'])
# if our language is hosted on the main site, we need to add its name
# to the query in order to narrow the results to that language
if language in main_langs:
- query += '(' + main_langs[language] + ')'
+ query += b' (' + main_langs[language] + b')'
# prepare the request parameters
query = urlencode({'search': query})
diff --git a/searx/engines/base.py b/searx/engines/base.py
index be0b7d247..f1b1cf671 100755
--- a/searx/engines/base.py
+++ b/searx/engines/base.py
@@ -55,7 +55,7 @@ shorcut_dict = {
def request(query, params):
# replace shortcuts with API advanced search keywords
for key in shorcut_dict.keys():
- query = re.sub(str(key), str(shorcut_dict[key]), query)
+ query = re.sub(key, shorcut_dict[key], str(query))
# basic search
offset = (params['pageno'] - 1) * number_of_results
diff --git a/searx/engines/bing.py b/searx/engines/bing.py
index 2e58d0293..2da40619d 100644
--- a/searx/engines/bing.py
+++ b/searx/engines/bing.py
@@ -16,12 +16,14 @@
from lxml import html
from searx.engines.xpath import extract_text
from searx.url_utils import urlencode
+from searx.utils import match_language, gen_useragent
# engine dependent config
categories = ['general']
paging = True
language_support = True
supported_languages_url = 'https://www.bing.com/account/general'
+language_aliases = {'zh-CN': 'zh-CHS', 'zh-TW': 'zh-CHT', 'zh-HK': 'zh-CHT'}
# search-url
base_url = 'https://www.bing.com/'
@@ -32,15 +34,18 @@ search_string = 'search?{query}&first={offset}'
def request(query, params):
offset = (params['pageno'] - 1) * 10 + 1
- lang = params['language'].split('-')[0].upper()
+ lang = match_language(params['language'], supported_languages, language_aliases)
- query = u'language:{} {}'.format(lang, query.decode('utf-8')).encode('utf-8')
+ query = u'language:{} {}'.format(lang.split('-')[0].upper(), query.decode('utf-8')).encode('utf-8')
search_path = search_string.format(
query=urlencode({'q': query}),
offset=offset)
params['url'] = base_url + search_path
+
+ params['headers']['User-Agent'] = gen_useragent('Windows NT 6.3; WOW64')
+
return params
diff --git a/searx/engines/bing_images.py b/searx/engines/bing_images.py
index 15679056c..66e14c01f 100644
--- a/searx/engines/bing_images.py
+++ b/searx/engines/bing_images.py
@@ -19,6 +19,7 @@ from lxml import html
from json import loads
import re
from searx.url_utils import urlencode
+from searx.utils import match_language
# engine dependent config
categories = ['images']
@@ -46,26 +47,6 @@ safesearch_types = {2: 'STRICT',
_quote_keys_regex = re.compile('({|,)([a-z][a-z0-9]*):(")', re.I | re.U)
-# get supported region code
-def get_region_code(lang, lang_list=None):
- region = None
- if lang in (lang_list or supported_languages):
- region = lang
- elif lang.startswith('no'):
- region = 'nb-NO'
- else:
- # try to get a supported country code with language
- lang = lang.split('-')[0]
- for lc in (lang_list or supported_languages):
- if lang == lc.split('-')[0]:
- region = lc
- break
- if region:
- return region.lower()
- else:
- return 'en-us'
-
-
# do search-request
def request(query, params):
offset = (params['pageno'] - 1) * 10 + 1
@@ -74,7 +55,7 @@ def request(query, params):
query=urlencode({'q': query}),
offset=offset)
- language = get_region_code(params['language'])
+ language = match_language(params['language'], supported_languages).lower()
params['cookies']['SRCHHPGUSR'] = \
'ADLT=' + safesearch_types.get(params['safesearch'], 'DEMOTE')
diff --git a/searx/engines/bing_news.py b/searx/engines/bing_news.py
index c609a1949..39048a1fc 100644
--- a/searx/engines/bing_news.py
+++ b/searx/engines/bing_news.py
@@ -14,8 +14,8 @@
from datetime import datetime
from dateutil import parser
from lxml import etree
-from searx.utils import list_get
-from searx.engines.bing import _fetch_supported_languages, supported_languages_url
+from searx.utils import list_get, match_language
+from searx.engines.bing import _fetch_supported_languages, supported_languages_url, language_aliases
from searx.url_utils import urlencode, urlparse, parse_qsl
# engine dependent config
@@ -71,7 +71,7 @@ def request(query, params):
offset = (params['pageno'] - 1) * 10 + 1
- language = params['language']
+ language = match_language(params['language'], supported_languages, language_aliases)
params['url'] = _get_url(query, language, offset, params['time_range'])
diff --git a/searx/engines/bing_videos.py b/searx/engines/bing_videos.py
index 312a82ba1..7002ac861 100644
--- a/searx/engines/bing_videos.py
+++ b/searx/engines/bing_videos.py
@@ -12,9 +12,10 @@
from json import loads
from lxml import html
-from searx.engines.bing_images import _fetch_supported_languages, supported_languages_url, get_region_code
+from searx.engines.bing_images import _fetch_supported_languages, supported_languages_url
from searx.engines.xpath import extract_text
from searx.url_utils import urlencode
+from searx.utils import match_language
categories = ['videos']
@@ -47,8 +48,8 @@ def request(query, params):
'ADLT=' + safesearch_types.get(params['safesearch'], 'DEMOTE')
# language cookie
- region = get_region_code(params['language'], lang_list=supported_languages)
- params['cookies']['_EDGE_S'] = 'mkt=' + region + '&F=1'
+ language = match_language(params['language'], supported_languages).lower()
+ params['cookies']['_EDGE_S'] = 'mkt=' + language + '&F=1'
# query and paging
params['url'] = search_url.format(query=urlencode({'q': query}),
diff --git a/searx/engines/currency_convert.py b/searx/engines/currency_convert.py
index 9c1c2f7b3..8eab8f673 100644
--- a/searx/engines/currency_convert.py
+++ b/searx/engines/currency_convert.py
@@ -11,7 +11,7 @@ if sys.version_info[0] == 3:
unicode = str
categories = []
-url = 'https://finance.google.com/finance/converter?a=1&from={0}&to={1}'
+url = 'https://duckduckgo.com/js/spice/currency/1/{0}/{1}'
weight = 100
parser_re = re.compile(b'.*?(\\d+(?:\\.\\d+)?) ([^.0-9]+) (?:in|to) ([^.0-9]+)', re.I)
@@ -44,7 +44,6 @@ def request(query, params):
if not m:
# wrong query
return params
-
amount, from_currency, to_currency = m.groups()
amount = float(amount)
from_currency = name_to_iso4217(from_currency.strip())
@@ -63,16 +62,13 @@ def request(query, params):
def response(resp):
+ """remove first and last lines to get only json"""
+ json_resp = resp.text[resp.text.find('\n') + 1:resp.text.rfind('\n') - 2]
results = []
- pat = '<span class=bld>(.+) {0}</span>'.format(
- resp.search_params['to'].upper())
-
try:
- conversion_rate = re.findall(pat, resp.text)[0]
- conversion_rate = float(conversion_rate)
+ conversion_rate = float(json.loads(json_resp)['conversion']['converted-amount'])
except:
return results
-
answer = '{0} {1} = {2} {3}, 1 {1} ({5}) = {4} {3} ({6})'.format(
resp.search_params['amount'],
resp.search_params['from'],
@@ -83,7 +79,7 @@ def response(resp):
resp.search_params['to_name'],
)
- url = 'https://finance.google.com/finance?q={0}{1}'.format(
+ url = 'https://duckduckgo.com/js/spice/currency/1/{0}/{1}'.format(
resp.search_params['from'].upper(), resp.search_params['to'])
results.append({'answer': answer, 'url': url})
diff --git a/searx/engines/dailymotion.py b/searx/engines/dailymotion.py
index cfa76796d..8268b6257 100644
--- a/searx/engines/dailymotion.py
+++ b/searx/engines/dailymotion.py
@@ -15,6 +15,7 @@
from json import loads
from datetime import datetime
from searx.url_utils import urlencode
+from searx.utils import match_language
# engine dependent config
categories = ['videos']
@@ -32,7 +33,7 @@ supported_languages_url = 'https://api.dailymotion.com/languages'
# do search-request
def request(query, params):
- locale = params['language']
+ locale = match_language(params['language'], supported_languages)
params['url'] = search_url.format(
query=urlencode({'search': query, 'localization': locale}),
diff --git a/searx/engines/duckduckgo.py b/searx/engines/duckduckgo.py
index 6f8797fed..2c5dc50d8 100644
--- a/searx/engines/duckduckgo.py
+++ b/searx/engines/duckduckgo.py
@@ -18,16 +18,27 @@ from json import loads
from searx.engines.xpath import extract_text
from searx.poolrequests import get
from searx.url_utils import urlencode
+from searx.utils import match_language
# engine dependent config
categories = ['general']
paging = True
language_support = True
-supported_languages_url = 'https://duckduckgo.com/d2030.js'
+supported_languages_url = 'https://duckduckgo.com/util/u172.js'
time_range_support = True
+language_aliases = {
+ 'ar-SA': 'ar-XA',
+ 'es-419': 'es-XL',
+ 'ja': 'jp-JP',
+ 'ko': 'kr-KR',
+ 'sl-SI': 'sl-SL',
+ 'zh-TW': 'tzh-TW',
+ 'zh-HK': 'tzh-HK'
+}
+
# search-url
-url = 'https://duckduckgo.com/html?{query}&s={offset}&api=/d.js&o=json&dc={dc_param}'
+url = 'https://duckduckgo.com/html?{query}&s={offset}&dc={dc_param}'
time_range_url = '&df={range}'
time_range_dict = {'day': 'd',
@@ -42,34 +53,12 @@ content_xpath = './/a[@class="result__snippet"]'
# match query's language to a region code that duckduckgo will accept
-def get_region_code(lang, lang_list=None):
- # custom fixes for languages
- if lang[:2] == 'ja':
- region_code = 'jp-jp'
- elif lang[:2] == 'sl':
- region_code = 'sl-sl'
- elif lang == 'zh-TW':
- region_code = 'tw-tzh'
- elif lang == 'zh-HK':
- region_code = 'hk-tzh'
- elif lang[-2:] == 'SA':
- region_code = 'xa-' + lang.split('-')[0]
- elif lang[-2:] == 'GB':
- region_code = 'uk-' + lang.split('-')[0]
- else:
- region_code = lang.split('-')
- if len(region_code) == 2:
- # country code goes first
- region_code = region_code[1].lower() + '-' + region_code[0].lower()
- else:
- # tries to get a country code from language
- region_code = region_code[0].lower()
- for lc in (lang_list or supported_languages):
- lc = lc.split('-')
- if region_code == lc[0]:
- region_code = lc[1].lower() + '-' + lc[0].lower()
- break
- return region_code
+def get_region_code(lang, lang_list=[]):
+ lang_code = match_language(lang, lang_list, language_aliases, 'wt-WT')
+ lang_parts = lang_code.split('-')
+
+ # country code goes first
+ return lang_parts[1].lower() + '-' + lang_parts[0].lower()
# do search-request
@@ -79,7 +68,7 @@ def request(query, params):
offset = (params['pageno'] - 1) * 30
- region_code = get_region_code(params['language'])
+ region_code = get_region_code(params['language'], supported_languages)
params['url'] = url.format(
query=urlencode({'q': query, 'kl': region_code}), offset=offset, dc_param=offset)
diff --git a/searx/engines/duckduckgo_definitions.py b/searx/engines/duckduckgo_definitions.py
index 21c6a6578..957a13ea6 100644
--- a/searx/engines/duckduckgo_definitions.py
+++ b/searx/engines/duckduckgo_definitions.py
@@ -2,9 +2,9 @@ import json
from lxml import html
from re import compile
from searx.engines.xpath import extract_text
-from searx.engines.duckduckgo import _fetch_supported_languages, supported_languages_url
+from searx.engines.duckduckgo import _fetch_supported_languages, supported_languages_url, language_aliases
from searx.url_utils import urlencode
-from searx.utils import html_to_text
+from searx.utils import html_to_text, match_language
url = 'https://api.duckduckgo.com/'\
+ '?{query}&format=json&pretty=0&no_redirect=1&d=1'
@@ -24,7 +24,8 @@ def result_to_text(url, text, htmlResult):
def request(query, params):
params['url'] = url.format(query=urlencode({'q': query}))
- params['headers']['Accept-Language'] = params['language'].split('-')[0]
+ language = match_language(params['language'], supported_languages, language_aliases)
+ params['headers']['Accept-Language'] = language.split('-')[0]
return params
diff --git a/searx/engines/duckduckgo_images.py b/searx/engines/duckduckgo_images.py
index 7b0e72694..7905d0bcd 100644
--- a/searx/engines/duckduckgo_images.py
+++ b/searx/engines/duckduckgo_images.py
@@ -15,7 +15,10 @@
from json import loads
from searx.engines.xpath import extract_text
-from searx.engines.duckduckgo import _fetch_supported_languages, supported_languages_url, get_region_code
+from searx.engines.duckduckgo import (
+ _fetch_supported_languages, supported_languages_url,
+ get_region_code, language_aliases
+)
from searx.poolrequests import get
from searx.url_utils import urlencode
diff --git a/searx/engines/findx.py b/searx/engines/findx.py
new file mode 100644
index 000000000..87c9d503c
--- /dev/null
+++ b/searx/engines/findx.py
@@ -0,0 +1,115 @@
+"""
+FindX (General, Images, Videos)
+
+@website https://www.findx.com
+@provide-api no
+@using-api no
+@results HTML
+@stable no
+@parse url, title, content, embedded, img_src, thumbnail_src
+"""
+
+from dateutil import parser
+from json import loads
+import re
+
+from lxml import html
+
+from searx import logger
+from searx.engines.xpath import extract_text
+from searx.engines.youtube_noapi import base_youtube_url, embedded_url
+from searx.url_utils import urlencode
+
+
+paging = True
+results_xpath = '//script[@id="initial-state"]'
+search_url = 'https://www.findx.com/{category}?{q}'
+type_map = {
+ 'none': 'web',
+ 'general': 'web',
+ 'images': 'images',
+ 'videos': 'videos',
+}
+
+
+def request(query, params):
+ params['url'] = search_url.format(
+ category=type_map[params['category']],
+ q=urlencode({
+ 'q': query,
+ 'page': params['pageno']
+ })
+ )
+ return params
+
+
+def response(resp):
+ dom = html.fromstring(resp.text)
+ results_raw_json = dom.xpath(results_xpath)
+ results_json = loads(extract_text(results_raw_json))
+
+ if len(results_json['web']['results']) > 0:
+ return _general_results(results_json['web']['results']['webSearch']['results'])
+
+ if len(results_json['images']['results']) > 0:
+ return _images_results(results_json['images']['results'])
+
+ if len(results_json['video']['results']) > 0:
+ return _videos_results(results_json['video']['results'])
+
+ return []
+
+
+def _general_results(general_results):
+ results = []
+ for result in general_results:
+ results.append({
+ 'url': result['url'],
+ 'title': result['title'],
+ 'content': result['sum'],
+ })
+ return results
+
+
+def _images_results(image_results):
+ results = []
+ for result in image_results:
+ results.append({
+ 'url': result['sourceURL'],
+ 'title': result['title'],
+ 'content': result['source'],
+ 'thumbnail_src': _extract_url(result['assets']['thumb']['url']),
+ 'img_src': _extract_url(result['assets']['file']['url']),
+ 'template': 'images.html',
+ })
+ return results
+
+
+def _videos_results(video_results):
+ results = []
+ for result in video_results:
+ if not result['kind'].startswith('youtube'):
+ logger.warn('Unknown video kind in findx: {}'.format(result['kind']))
+ continue
+
+ description = result['snippet']['description']
+ if len(description) > 300:
+ description = description[:300] + '...'
+
+ results.append({
+ 'url': base_youtube_url + result['id'],
+ 'title': result['snippet']['title'],
+ 'content': description,
+ 'thumbnail': _extract_url(result['snippet']['thumbnails']['default']['url']),
+ 'publishedDate': parser.parse(result['snippet']['publishedAt']),
+ 'embedded': embedded_url.format(videoid=result['id']),
+ 'template': 'videos.html',
+ })
+ return results
+
+
+def _extract_url(url):
+ matching = re.search('(/https?://[^)]+)', url)
+ if matching:
+ return matching.group(0)[1:]
+ return ''
diff --git a/searx/engines/gigablast.py b/searx/engines/gigablast.py
index ed9380ec0..9b9b9bd77 100644
--- a/searx/engines/gigablast.py
+++ b/searx/engines/gigablast.py
@@ -34,6 +34,7 @@ search_string = 'search?{query}'\
'&qlang={lang}'\
'&ff={safesearch}'\
'&rxiec={rxieu}'\
+ '&ulse={ulse}'\
'&rand={rxikd}' # current unix timestamp
# specific xpath variables
@@ -64,6 +65,7 @@ def request(query, params):
number_of_results=number_of_results,
rxikd=int(time() * 1000),
rxieu=random.randint(1000000000, 9999999999),
+ ulse=random.randint(100000000, 999999999),
lang=language,
safesearch=safesearch)
diff --git a/searx/engines/google.py b/searx/engines/google.py
index 0a8678362..62e7d1170 100644
--- a/searx/engines/google.py
+++ b/searx/engines/google.py
@@ -14,6 +14,7 @@ from lxml import html, etree
from searx.engines.xpath import extract_text, extract_url
from searx import logger
from searx.url_utils import urlencode, urlparse, parse_qsl
+from searx.utils import match_language
logger = logger.getChild('google engine')
@@ -72,7 +73,7 @@ country_to_hostname = {
'RO': 'www.google.ro', # Romania
'RU': 'www.google.ru', # Russia
'SK': 'www.google.sk', # Slovakia
- 'SL': 'www.google.si', # Slovenia (SL -> si)
+ 'SI': 'www.google.si', # Slovenia
'SE': 'www.google.se', # Sweden
'TH': 'www.google.co.th', # Thailand
'TR': 'www.google.com.tr', # Turkey
@@ -90,7 +91,7 @@ url_map = 'https://www.openstreetmap.org/'\
search_path = '/search'
search_url = ('https://{hostname}' +
search_path +
- '?{query}&start={offset}&gws_rd=cr&gbv=1&lr={lang}&ei=x')
+ '?{query}&start={offset}&gws_rd=cr&gbv=1&lr={lang}&hl={lang_short}&ei=x')
time_range_search = "&tbs=qdr:{range}"
time_range_dict = {'day': 'd',
@@ -165,22 +166,16 @@ def extract_text_from_dom(result, xpath):
def request(query, params):
offset = (params['pageno'] - 1) * 10
- # temporary fix until a way of supporting en-US is found
- if params['language'] == 'en-US':
- params['language'] = 'en-GB'
-
- if params['language'][:2] == 'jv':
- language = 'jw'
- country = 'ID'
- url_lang = 'lang_jw'
+ language = match_language(params['language'], supported_languages)
+ language_array = language.split('-')
+ if params['language'].find('-') > 0:
+ country = params['language'].split('-')[1]
+ elif len(language_array) == 2:
+ country = language_array[1]
else:
- language_array = params['language'].lower().split('-')
- if len(language_array) == 2:
- country = language_array[1]
- else:
- country = 'US'
- language = language_array[0] + ',' + language_array[0] + '-' + country
- url_lang = 'lang_' + language_array[0]
+ country = 'US'
+
+ url_lang = 'lang_' + language
if use_locale_domain:
google_hostname = country_to_hostname.get(country.upper(), default_hostname)
@@ -192,11 +187,12 @@ def request(query, params):
params['url'] = search_url.format(offset=offset,
query=urlencode({'q': query}),
hostname=google_hostname,
- lang=url_lang)
+ lang=url_lang,
+ lang_short=language)
if params['time_range'] in time_range_dict:
params['url'] += time_range_search.format(range=time_range_dict[params['time_range']])
- params['headers']['Accept-Language'] = language
+ params['headers']['Accept-Language'] = language + ',' + language + '-' + country
params['headers']['Accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'
params['google_hostname'] = google_hostname
diff --git a/searx/engines/google_images.py b/searx/engines/google_images.py
index a380170ca..504831a10 100644
--- a/searx/engines/google_images.py
+++ b/searx/engines/google_images.py
@@ -13,7 +13,7 @@
from datetime import date, timedelta
from json import loads
from lxml import html
-from searx.url_utils import urlencode
+from searx.url_utils import urlencode, urlparse, parse_qs
# engine dependent config
@@ -25,10 +25,9 @@ number_of_results = 100
search_url = 'https://www.google.com/search'\
'?{query}'\
- '&asearch=ichunk'\
- '&async=_id:rg_s,_pms:s'\
'&tbm=isch'\
- '&yv=2'\
+ '&gbv=1'\
+ '&sa=G'\
'&{search_options}'
time_range_attr = "qdr:{range}"
time_range_custom_attr = "cdr:1,cd_min:{start},cd_max{end}"
@@ -66,30 +65,22 @@ def request(query, params):
def response(resp):
results = []
- g_result = loads(resp.text)
-
- dom = html.fromstring(g_result[1][1])
+ dom = html.fromstring(resp.text)
# parse results
- for result in dom.xpath('//div[@data-ved]'):
-
- try:
- metadata = loads(''.join(result.xpath('./div[contains(@class, "rg_meta")]/text()')))
- except:
- continue
-
- thumbnail_src = metadata['tu']
-
- # http to https
- thumbnail_src = thumbnail_src.replace("http://", "https://")
-
+ for img in dom.xpath('//a'):
+ r = {
+ 'title': u' '.join(img.xpath('.//div[class="rg_ilmbg"]//text()')),
+ 'content': '',
+ 'template': 'images.html',
+ }
+ url = urlparse(img.xpath('.//@href')[0])
+ query = parse_qs(url.query)
+ r['url'] = query['imgrefurl'][0]
+ r['img_src'] = query['imgurl'][0]
+ r['thumbnail_src'] = r['img_src']
# append result
- results.append({'url': metadata['ru'],
- 'title': metadata['pt'],
- 'content': metadata['s'],
- 'thumbnail_src': thumbnail_src,
- 'img_src': metadata['ou'],
- 'template': 'images.html'})
+ results.append(r)
# return results
return results
diff --git a/searx/engines/google_news.py b/searx/engines/google_news.py
index 8b8e7175d..aadcb76df 100644
--- a/searx/engines/google_news.py
+++ b/searx/engines/google_news.py
@@ -13,6 +13,7 @@
from lxml import html
from searx.engines.google import _fetch_supported_languages, supported_languages_url
from searx.url_utils import urlencode
+from searx.utils import match_language
# search-url
categories = ['news']
@@ -50,8 +51,9 @@ def request(query, params):
params['url'] = search_url.format(query=urlencode({'q': query}),
search_options=urlencode(search_options))
- language_array = params['language'].lower().split('-')
- params['url'] += '&lr=lang_' + language_array[0]
+ language = match_language(params['language'], supported_languages).split('-')[0]
+ if language:
+ params['url'] += '&lr=lang_' + language
return params
@@ -66,8 +68,8 @@ def response(resp):
for result in dom.xpath('//div[@class="g"]|//div[@class="g _cy"]'):
try:
r = {
- 'url': result.xpath('.//a[@class="l _PMs"]')[0].attrib.get("href"),
- 'title': ''.join(result.xpath('.//a[@class="l _PMs"]//text()')),
+ 'url': result.xpath('.//a[@class="l lLrAF"]')[0].attrib.get("href"),
+ 'title': ''.join(result.xpath('.//a[@class="l lLrAF"]//text()')),
'content': ''.join(result.xpath('.//div[@class="st"]//text()')),
}
except:
diff --git a/searx/engines/nyaa.py b/searx/engines/nyaa.py
index 6a8e598c4..c57979a5f 100644
--- a/searx/engines/nyaa.py
+++ b/searx/engines/nyaa.py
@@ -1,7 +1,7 @@
"""
Nyaa.si (Anime Bittorrent tracker)
- @website http://www.nyaa.si/
+ @website https://nyaa.si/
@provide-api no
@using-api no
@results HTML
@@ -19,7 +19,7 @@ categories = ['files', 'images', 'videos', 'music']
paging = True
# search-url
-base_url = 'http://www.nyaa.si/'
+base_url = 'https://nyaa.si/'
search_url = base_url + '?page=search&{query}&offset={offset}'
# xpath queries
diff --git a/searx/engines/piratebay.py b/searx/engines/piratebay.py
index a5af8d824..2f3f22a97 100644
--- a/searx/engines/piratebay.py
+++ b/searx/engines/piratebay.py
@@ -18,7 +18,7 @@ categories = ['videos', 'music', 'files']
paging = True
# search-url
-url = 'https://thepiratebay.se/'
+url = 'https://thepiratebay.org/'
search_url = url + 'search/{search_term}/{pageno}/99/{search_type}'
# piratebay specific type-definitions
diff --git a/searx/engines/qwant.py b/searx/engines/qwant.py
index 408c2b3de..4b0f1c87c 100644
--- a/searx/engines/qwant.py
+++ b/searx/engines/qwant.py
@@ -14,6 +14,7 @@ from datetime import datetime
from json import loads
from searx.utils import html_to_text
from searx.url_utils import urlencode
+from searx.utils import match_language
# engine dependent config
categories = None
@@ -27,7 +28,7 @@ category_to_keyword = {'general': 'web',
'social media': 'social'}
# search-url
-url = 'https://api.qwant.com/api/search/{keyword}?count=10&offset={offset}&f=&{query}'
+url = 'https://api.qwant.com/api/search/{keyword}?count=10&offset={offset}&f=&{query}&t={keyword}&uiv=4'
# do search-request
@@ -45,16 +46,8 @@ def request(query, params):
offset=offset)
# add language tag
- if params['language'] == 'no' or params['language'].startswith('no-'):
- params['language'] = params['language'].replace('no', 'nb', 1)
- if params['language'].find('-') < 0:
- # tries to get a country code from language
- for lang in supported_languages:
- lc = lang.split('-')
- if params['language'] == lc[0]:
- params['language'] = lang
- break
- params['url'] += '&locale=' + params['language'].replace('-', '_').lower()
+ language = match_language(params['language'], supported_languages)
+ params['url'] += '&locale=' + language.replace('-', '_').lower()
return params
diff --git a/searx/engines/swisscows.py b/searx/engines/swisscows.py
index 45e9d87a9..ff4df24b7 100644
--- a/searx/engines/swisscows.py
+++ b/searx/engines/swisscows.py
@@ -14,6 +14,7 @@ from json import loads
import re
from lxml.html import fromstring
from searx.url_utils import unquote, urlencode
+from searx.utils import match_language
# engine dependent config
categories = ['general', 'images']
@@ -35,11 +36,8 @@ regex_img_url_remove_start = re.compile(b'^https?://i\.swisscows\.ch/\?link=')
# do search-request
def request(query, params):
- if params['language'].split('-')[0] == 'no':
- region = 'nb-NO'
- else:
- region = params['language']
- ui_language = params['language'].split('-')[0]
+ region = match_language(params['language'], supported_languages)
+ ui_language = region.split('-')[0]
search_path = search_string.format(
query=urlencode({'query': query, 'uiLanguage': ui_language, 'region': region}),
diff --git a/searx/engines/wikidata.py b/searx/engines/wikidata.py
index 1f31a1f88..c315b30da 100644
--- a/searx/engines/wikidata.py
+++ b/searx/engines/wikidata.py
@@ -16,6 +16,7 @@ from searx.poolrequests import get
from searx.engines.xpath import extract_text
from searx.engines.wikipedia import _fetch_supported_languages, supported_languages_url
from searx.url_utils import urlencode
+from searx.utils import match_language
from json import loads
from lxml.html import fromstring
@@ -26,7 +27,7 @@ result_count = 1
# urls
wikidata_host = 'https://www.wikidata.org'
url_search = wikidata_host \
- + '/wiki/Special:ItemDisambiguation?{query}'
+ + '/w/index.php?{query}'
wikidata_api = wikidata_host + '/w/api.php'
url_detail = wikidata_api\
@@ -39,7 +40,7 @@ url_map = 'https://www.openstreetmap.org/'\
url_image = 'https://commons.wikimedia.org/wiki/Special:FilePath/{filename}?width=500&height=400'
# xpaths
-wikidata_ids_xpath = '//div/ul[@class="wikibase-disambiguation"]/li/a/@title'
+wikidata_ids_xpath = '//ul[@class="mw-search-results"]/li//a/@href'
title_xpath = '//*[contains(@class,"wikibase-title-label")]'
description_xpath = '//div[contains(@class,"wikibase-entitytermsview-heading-description")]'
property_xpath = '//div[@id="{propertyid}"]'
@@ -53,25 +54,25 @@ value_xpath = './/div[contains(@class,"wikibase-statementview-mainsnak")]'\
+ '/*/div[contains(@class,"wikibase-snakview-value")]'
language_fallback_xpath = '//sup[contains(@class,"wb-language-fallback-indicator")]'
calendar_name_xpath = './/sup[contains(@class,"wb-calendar-name")]'
+media_xpath = value_xpath + '//div[contains(@class,"commons-media-caption")]//a'
def request(query, params):
- language = params['language'].split('-')[0]
-
params['url'] = url_search.format(
- query=urlencode({'label': query, 'language': language}))
+ query=urlencode({'search': query}))
return params
def response(resp):
results = []
html = fromstring(resp.text)
- wikidata_ids = html.xpath(wikidata_ids_xpath)
+ search_results = html.xpath(wikidata_ids_xpath)
- language = resp.search_params['language'].split('-')[0]
+ language = match_language(resp.search_params['language'], supported_languages).split('-')[0]
# TODO: make requests asynchronous to avoid timeout when result_count > 1
- for wikidata_id in wikidata_ids[:result_count]:
+ for search_result in search_results[:result_count]:
+ wikidata_id = search_result.split('/')[-1]
url = url_detail.format(query=urlencode({'page': wikidata_id, 'uselang': language}))
htmlresponse = get(url)
jsonresponse = loads(htmlresponse.text)
@@ -313,7 +314,7 @@ def add_image(result):
for property_id in property_ids:
image = result.xpath(property_xpath.replace('{propertyid}', property_id))
if image:
- image_name = image[0].xpath(value_xpath)
+ image_name = image[0].xpath(media_xpath)
image_src = url_image.replace('{filename}', extract_text(image_name[0]))
return image_src
diff --git a/searx/engines/wikipedia.py b/searx/engines/wikipedia.py
index fe82f5115..6cd17e378 100644
--- a/searx/engines/wikipedia.py
+++ b/searx/engines/wikipedia.py
@@ -13,6 +13,7 @@
from json import loads
from lxml.html import fromstring
from searx.url_utils import quote, urlencode
+from searx.utils import match_language
# search-url
base_url = u'https://{language}.wikipedia.org/'
@@ -30,13 +31,7 @@ supported_languages_url = 'https://meta.wikimedia.org/wiki/List_of_Wikipedias'
# set language in base_url
def url_lang(lang):
- lang = lang.split('-')[0]
- if lang not in supported_languages:
- language = 'en'
- else:
- language = lang
-
- return language
+ return match_language(lang, supported_languages).split('-')[0]
# do search-request
diff --git a/searx/engines/xpath.py b/searx/engines/xpath.py
index c8c56da44..50f98d935 100644
--- a/searx/engines/xpath.py
+++ b/searx/engines/xpath.py
@@ -53,7 +53,7 @@ def extract_url(xpath_results, search_url):
if url.startswith('//'):
# add http or https to this kind of url //example.com/
parsed_search_url = urlparse(search_url)
- url = u'{0}:{1}'.format(parsed_search_url.scheme, url)
+ url = u'{0}:{1}'.format(parsed_search_url.scheme or 'http', url)
elif url.startswith('/'):
# fix relative url to the search engine
url = urljoin(search_url, url)
diff --git a/searx/engines/yahoo.py b/searx/engines/yahoo.py
index 626a398b5..ba4cb6af8 100644
--- a/searx/engines/yahoo.py
+++ b/searx/engines/yahoo.py
@@ -14,6 +14,7 @@
from lxml import html
from searx.engines.xpath import extract_text, extract_url
from searx.url_utils import unquote, urlencode
+from searx.utils import match_language
# engine dependent config
categories = ['general']
@@ -39,6 +40,8 @@ time_range_dict = {'day': ['1d', 'd'],
'week': ['1w', 'w'],
'month': ['1m', 'm']}
+language_aliases = {'zh-CN': 'zh-CHS', 'zh-TW': 'zh-CHT', 'zh-HK': 'zh-CHT'}
+
# remove yahoo-specific tracking-url
def parse_url(url_string):
@@ -70,23 +73,16 @@ def _get_url(query, offset, language, time_range):
lang=language)
-def _get_language(params):
- if params['language'][:2] == 'zh':
- if params['language'] == 'zh' or params['language'] == 'zh-CH':
- return 'szh'
- else:
- return 'tzh'
- else:
- return params['language'].split('-')[0]
-
-
# do search-request
def request(query, params):
if params['time_range'] and params['time_range'] not in time_range_dict:
return params
offset = (params['pageno'] - 1) * 10 + 1
- language = _get_language(params)
+ language = match_language(params['language'], supported_languages, language_aliases)
+ if language not in language_aliases.values():
+ language = language.split('-')[0]
+ language = language.replace('-', '_').lower()
params['url'] = _get_url(query, offset, language, params['time_range'])
@@ -145,7 +141,11 @@ def _fetch_supported_languages(resp):
dom = html.fromstring(resp.text)
options = dom.xpath('//div[@id="yschlang"]/span/label/input')
for option in options:
- code = option.xpath('./@value')[0][5:].replace('_', '-')
+ code_parts = option.xpath('./@value')[0][5:].split('_')
+ if len(code_parts) == 2:
+ code = code_parts[0] + '-' + code_parts[1].upper()
+ else:
+ code = code_parts[0]
supported_languages.append(code)
return supported_languages
diff --git a/searx/engines/yahoo_news.py b/searx/engines/yahoo_news.py
index 69e9aef4f..b61384d06 100644
--- a/searx/engines/yahoo_news.py
+++ b/searx/engines/yahoo_news.py
@@ -13,9 +13,12 @@ import re
from datetime import datetime, timedelta
from lxml import html
from searx.engines.xpath import extract_text, extract_url
-from searx.engines.yahoo import parse_url, _fetch_supported_languages, supported_languages_url
+from searx.engines.yahoo import (
+ parse_url, _fetch_supported_languages, supported_languages_url, language_aliases
+)
from dateutil import parser
from searx.url_utils import urlencode
+from searx.utils import match_language
# engine dependent config
categories = ['news']
@@ -38,7 +41,7 @@ suggestion_xpath = '//div[contains(@class,"VerALSOTRY")]//a'
def request(query, params):
offset = (params['pageno'] - 1) * 10 + 1
- language = params['language'].split('-')[0]
+ language = match_language(params['language'], supported_languages, language_aliases).split('-')[0]
params['url'] = search_url.format(offset=offset,
query=urlencode({'p': query}),