diff options
author | Marc Abonce Seguin <marc-abonce@mailbox.org> | 2018-02-28 22:30:48 -0600 |
---|---|---|
committer | Marc Abonce Seguin <marc-abonce@mailbox.org> | 2018-03-27 00:08:03 -0600 |
commit | 772c048d01c7585fd60afca1ce30a1914e6e5b4a (patch) | |
tree | 96a5662897df2bcf0ab53456e0a67ace998f2169 /searx/engines | |
parent | d1eae9359f8c5920632a730744ea2208070f06da (diff) | |
download | searxng-772c048d01c7585fd60afca1ce30a1914e6e5b4a.tar.gz searxng-772c048d01c7585fd60afca1ce30a1914e6e5b4a.zip |
refactor engine's search language handling
Add match_language function in utils to match any user given
language code with a list of engine's supported languages.
Also add language_aliases dict on each engine to translate
standard language codes into the custom codes used by the engine.
Diffstat (limited to 'searx/engines')
-rw-r--r-- | searx/engines/__init__.py | 21 | ||||
-rw-r--r-- | searx/engines/archlinux.py | 4 | ||||
-rw-r--r-- | searx/engines/bing.py | 6 | ||||
-rw-r--r-- | searx/engines/bing_images.py | 23 | ||||
-rw-r--r-- | searx/engines/bing_news.py | 6 | ||||
-rw-r--r-- | searx/engines/bing_videos.py | 7 | ||||
-rw-r--r-- | searx/engines/dailymotion.py | 3 | ||||
-rw-r--r-- | searx/engines/duckduckgo.py | 47 | ||||
-rw-r--r-- | searx/engines/duckduckgo_definitions.py | 7 | ||||
-rw-r--r-- | searx/engines/duckduckgo_images.py | 5 | ||||
-rw-r--r-- | searx/engines/google.py | 29 | ||||
-rw-r--r-- | searx/engines/google_news.py | 6 | ||||
-rw-r--r-- | searx/engines/qwant.py | 13 | ||||
-rw-r--r-- | searx/engines/swisscows.py | 8 | ||||
-rw-r--r-- | searx/engines/wikidata.py | 5 | ||||
-rw-r--r-- | searx/engines/wikipedia.py | 9 | ||||
-rw-r--r-- | searx/engines/yahoo.py | 24 | ||||
-rw-r--r-- | searx/engines/yahoo_news.py | 7 |
18 files changed, 109 insertions, 121 deletions
diff --git a/searx/engines/__init__.py b/searx/engines/__init__.py index af3cf8110..c2f9f3da4 100644 --- a/searx/engines/__init__.py +++ b/searx/engines/__init__.py @@ -20,13 +20,14 @@ import sys import threading from os.path import realpath, dirname from io import open +from babel.localedata import locale_identifiers from flask_babel import gettext from operator import itemgetter from json import loads from requests import get from searx import settings from searx import logger -from searx.utils import load_module +from searx.utils import load_module, match_language logger = logger.getChild('engines') @@ -38,6 +39,8 @@ engines = {} categories = {'general': []} languages = loads(open(engine_dir + '/../data/engines_languages.json', 'r', encoding='utf-8').read()) +babel_langs = [lang_parts[0] + '-' + lang_parts[-1] if len(lang_parts) > 1 else lang_parts[0] + for lang_parts in (lang_code.split('_') for lang_code in locale_identifiers())] engine_shortcuts = {} engine_default_args = {'paging': False, @@ -97,6 +100,22 @@ def load_engine(engine_data): if engine_data['name'] in languages: setattr(engine, 'supported_languages', languages[engine_data['name']]) + # find custom aliases for non standard language codes + if hasattr(engine, 'supported_languages'): + if hasattr(engine, 'language_aliases'): + language_aliases = getattr(engine, 'language_aliases') + else: + language_aliases = {} + + for engine_lang in getattr(engine, 'supported_languages'): + iso_lang = match_language(engine_lang, babel_langs, fallback=None) + if iso_lang and iso_lang != engine_lang and not engine_lang.startswith(iso_lang) and \ + iso_lang not in getattr(engine, 'supported_languages'): + language_aliases[iso_lang] = engine_lang + + if language_aliases: + setattr(engine, 'language_aliases', language_aliases) + # assign language fetching method if auxiliary method exists if hasattr(engine, '_fetch_supported_languages'): setattr(engine, 'fetch_supported_languages', diff --git a/searx/engines/archlinux.py b/searx/engines/archlinux.py index 245bc50b2..fc08112af 100644 --- a/searx/engines/archlinux.py +++ b/searx/engines/archlinux.py @@ -99,13 +99,13 @@ supported_languages = dict(lang_urls, **main_langs) # do search-request def request(query, params): - # translate the locale (e.g. 'en_US') to language code ('en') + # translate the locale (e.g. 'en-US') to language code ('en') language = locale_to_lang_code(params['language']) # if our language is hosted on the main site, we need to add its name # to the query in order to narrow the results to that language if language in main_langs: - query += '(' + main_langs[language] + ')' + query += b' (' + main_langs[language] + b')' # prepare the request parameters query = urlencode({'search': query}) diff --git a/searx/engines/bing.py b/searx/engines/bing.py index 2e58d0293..c6d41782b 100644 --- a/searx/engines/bing.py +++ b/searx/engines/bing.py @@ -16,12 +16,14 @@ from lxml import html from searx.engines.xpath import extract_text from searx.url_utils import urlencode +from searx.utils import match_language # engine dependent config categories = ['general'] paging = True language_support = True supported_languages_url = 'https://www.bing.com/account/general' +language_aliases = {'zh-CN': 'zh-CHS', 'zh-TW': 'zh-CHT', 'zh-HK': 'zh-CHT'} # search-url base_url = 'https://www.bing.com/' @@ -32,9 +34,9 @@ search_string = 'search?{query}&first={offset}' def request(query, params): offset = (params['pageno'] - 1) * 10 + 1 - lang = params['language'].split('-')[0].upper() + lang = match_language(params['language'], supported_languages, language_aliases) - query = u'language:{} {}'.format(lang, query.decode('utf-8')).encode('utf-8') + query = u'language:{} {}'.format(lang.split('-')[0].upper(), query.decode('utf-8')).encode('utf-8') search_path = search_string.format( query=urlencode({'q': query}), diff --git a/searx/engines/bing_images.py b/searx/engines/bing_images.py index 15679056c..66e14c01f 100644 --- a/searx/engines/bing_images.py +++ b/searx/engines/bing_images.py @@ -19,6 +19,7 @@ from lxml import html from json import loads import re from searx.url_utils import urlencode +from searx.utils import match_language # engine dependent config categories = ['images'] @@ -46,26 +47,6 @@ safesearch_types = {2: 'STRICT', _quote_keys_regex = re.compile('({|,)([a-z][a-z0-9]*):(")', re.I | re.U) -# get supported region code -def get_region_code(lang, lang_list=None): - region = None - if lang in (lang_list or supported_languages): - region = lang - elif lang.startswith('no'): - region = 'nb-NO' - else: - # try to get a supported country code with language - lang = lang.split('-')[0] - for lc in (lang_list or supported_languages): - if lang == lc.split('-')[0]: - region = lc - break - if region: - return region.lower() - else: - return 'en-us' - - # do search-request def request(query, params): offset = (params['pageno'] - 1) * 10 + 1 @@ -74,7 +55,7 @@ def request(query, params): query=urlencode({'q': query}), offset=offset) - language = get_region_code(params['language']) + language = match_language(params['language'], supported_languages).lower() params['cookies']['SRCHHPGUSR'] = \ 'ADLT=' + safesearch_types.get(params['safesearch'], 'DEMOTE') diff --git a/searx/engines/bing_news.py b/searx/engines/bing_news.py index c609a1949..39048a1fc 100644 --- a/searx/engines/bing_news.py +++ b/searx/engines/bing_news.py @@ -14,8 +14,8 @@ from datetime import datetime from dateutil import parser from lxml import etree -from searx.utils import list_get -from searx.engines.bing import _fetch_supported_languages, supported_languages_url +from searx.utils import list_get, match_language +from searx.engines.bing import _fetch_supported_languages, supported_languages_url, language_aliases from searx.url_utils import urlencode, urlparse, parse_qsl # engine dependent config @@ -71,7 +71,7 @@ def request(query, params): offset = (params['pageno'] - 1) * 10 + 1 - language = params['language'] + language = match_language(params['language'], supported_languages, language_aliases) params['url'] = _get_url(query, language, offset, params['time_range']) diff --git a/searx/engines/bing_videos.py b/searx/engines/bing_videos.py index 312a82ba1..7002ac861 100644 --- a/searx/engines/bing_videos.py +++ b/searx/engines/bing_videos.py @@ -12,9 +12,10 @@ from json import loads from lxml import html -from searx.engines.bing_images import _fetch_supported_languages, supported_languages_url, get_region_code +from searx.engines.bing_images import _fetch_supported_languages, supported_languages_url from searx.engines.xpath import extract_text from searx.url_utils import urlencode +from searx.utils import match_language categories = ['videos'] @@ -47,8 +48,8 @@ def request(query, params): 'ADLT=' + safesearch_types.get(params['safesearch'], 'DEMOTE') # language cookie - region = get_region_code(params['language'], lang_list=supported_languages) - params['cookies']['_EDGE_S'] = 'mkt=' + region + '&F=1' + language = match_language(params['language'], supported_languages).lower() + params['cookies']['_EDGE_S'] = 'mkt=' + language + '&F=1' # query and paging params['url'] = search_url.format(query=urlencode({'q': query}), diff --git a/searx/engines/dailymotion.py b/searx/engines/dailymotion.py index cfa76796d..8268b6257 100644 --- a/searx/engines/dailymotion.py +++ b/searx/engines/dailymotion.py @@ -15,6 +15,7 @@ from json import loads from datetime import datetime from searx.url_utils import urlencode +from searx.utils import match_language # engine dependent config categories = ['videos'] @@ -32,7 +33,7 @@ supported_languages_url = 'https://api.dailymotion.com/languages' # do search-request def request(query, params): - locale = params['language'] + locale = match_language(params['language'], supported_languages) params['url'] = search_url.format( query=urlencode({'search': query, 'localization': locale}), diff --git a/searx/engines/duckduckgo.py b/searx/engines/duckduckgo.py index 02ccff8e5..2c5dc50d8 100644 --- a/searx/engines/duckduckgo.py +++ b/searx/engines/duckduckgo.py @@ -18,6 +18,7 @@ from json import loads from searx.engines.xpath import extract_text from searx.poolrequests import get from searx.url_utils import urlencode +from searx.utils import match_language # engine dependent config categories = ['general'] @@ -26,6 +27,16 @@ language_support = True supported_languages_url = 'https://duckduckgo.com/util/u172.js' time_range_support = True +language_aliases = { + 'ar-SA': 'ar-XA', + 'es-419': 'es-XL', + 'ja': 'jp-JP', + 'ko': 'kr-KR', + 'sl-SI': 'sl-SL', + 'zh-TW': 'tzh-TW', + 'zh-HK': 'tzh-HK' +} + # search-url url = 'https://duckduckgo.com/html?{query}&s={offset}&dc={dc_param}' time_range_url = '&df={range}' @@ -42,34 +53,12 @@ content_xpath = './/a[@class="result__snippet"]' # match query's language to a region code that duckduckgo will accept -def get_region_code(lang, lang_list=None): - # custom fixes for languages - if lang[:2] == 'ja': - region_code = 'jp-jp' - elif lang[:2] == 'sl': - region_code = 'sl-sl' - elif lang == 'zh-TW': - region_code = 'tw-tzh' - elif lang == 'zh-HK': - region_code = 'hk-tzh' - elif lang[-2:] == 'SA': - region_code = 'xa-' + lang.split('-')[0] - elif lang[-2:] == 'GB': - region_code = 'uk-' + lang.split('-')[0] - else: - region_code = lang.split('-') - if len(region_code) == 2: - # country code goes first - region_code = region_code[1].lower() + '-' + region_code[0].lower() - else: - # tries to get a country code from language - region_code = region_code[0].lower() - for lc in (lang_list or supported_languages): - lc = lc.split('-') - if region_code == lc[0]: - region_code = lc[1].lower() + '-' + lc[0].lower() - break - return region_code +def get_region_code(lang, lang_list=[]): + lang_code = match_language(lang, lang_list, language_aliases, 'wt-WT') + lang_parts = lang_code.split('-') + + # country code goes first + return lang_parts[1].lower() + '-' + lang_parts[0].lower() # do search-request @@ -79,7 +68,7 @@ def request(query, params): offset = (params['pageno'] - 1) * 30 - region_code = get_region_code(params['language']) + region_code = get_region_code(params['language'], supported_languages) params['url'] = url.format( query=urlencode({'q': query, 'kl': region_code}), offset=offset, dc_param=offset) diff --git a/searx/engines/duckduckgo_definitions.py b/searx/engines/duckduckgo_definitions.py index 21c6a6578..957a13ea6 100644 --- a/searx/engines/duckduckgo_definitions.py +++ b/searx/engines/duckduckgo_definitions.py @@ -2,9 +2,9 @@ import json from lxml import html from re import compile from searx.engines.xpath import extract_text -from searx.engines.duckduckgo import _fetch_supported_languages, supported_languages_url +from searx.engines.duckduckgo import _fetch_supported_languages, supported_languages_url, language_aliases from searx.url_utils import urlencode -from searx.utils import html_to_text +from searx.utils import html_to_text, match_language url = 'https://api.duckduckgo.com/'\ + '?{query}&format=json&pretty=0&no_redirect=1&d=1' @@ -24,7 +24,8 @@ def result_to_text(url, text, htmlResult): def request(query, params): params['url'] = url.format(query=urlencode({'q': query})) - params['headers']['Accept-Language'] = params['language'].split('-')[0] + language = match_language(params['language'], supported_languages, language_aliases) + params['headers']['Accept-Language'] = language.split('-')[0] return params diff --git a/searx/engines/duckduckgo_images.py b/searx/engines/duckduckgo_images.py index 7b0e72694..7905d0bcd 100644 --- a/searx/engines/duckduckgo_images.py +++ b/searx/engines/duckduckgo_images.py @@ -15,7 +15,10 @@ from json import loads from searx.engines.xpath import extract_text -from searx.engines.duckduckgo import _fetch_supported_languages, supported_languages_url, get_region_code +from searx.engines.duckduckgo import ( + _fetch_supported_languages, supported_languages_url, + get_region_code, language_aliases +) from searx.poolrequests import get from searx.url_utils import urlencode diff --git a/searx/engines/google.py b/searx/engines/google.py index 99c0d2b45..93075e2dc 100644 --- a/searx/engines/google.py +++ b/searx/engines/google.py @@ -14,6 +14,7 @@ from lxml import html, etree from searx.engines.xpath import extract_text, extract_url from searx import logger from searx.url_utils import urlencode, urlparse, parse_qsl +from searx.utils import match_language logger = logger.getChild('google engine') @@ -165,22 +166,20 @@ def extract_text_from_dom(result, xpath): def request(query, params): offset = (params['pageno'] - 1) * 10 + language = match_language(params['language'], supported_languages) + language_array = language.split('-') + if params['language'].find('-') > 0: + country = params['language'].split('-')[1] + elif len(language_array) == 2: + country = language_array[1] + else: + country = 'US' + # temporary fix until a way of supporting en-US is found - if params['language'] == 'en-US': - params['language'] = 'en-GB' + if language == 'en-US': + country = 'GB' - if params['language'][:2] == 'jv': - language = 'jw' - country = 'ID' - url_lang = 'lang_jw' - else: - language_array = params['language'].lower().split('-') - if len(language_array) == 2: - country = language_array[1] - else: - country = 'US' - language = language_array[0] + ',' + language_array[0] + '-' + country - url_lang = 'lang_' + language_array[0] + url_lang = 'lang_' + language if use_locale_domain: google_hostname = country_to_hostname.get(country.upper(), default_hostname) @@ -196,7 +195,7 @@ def request(query, params): if params['time_range'] in time_range_dict: params['url'] += time_range_search.format(range=time_range_dict[params['time_range']]) - params['headers']['Accept-Language'] = language + params['headers']['Accept-Language'] = language + ',' + language + '-' + country params['headers']['Accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8' params['google_hostname'] = google_hostname diff --git a/searx/engines/google_news.py b/searx/engines/google_news.py index 8b8e7175d..41abf0a01 100644 --- a/searx/engines/google_news.py +++ b/searx/engines/google_news.py @@ -13,6 +13,7 @@ from lxml import html from searx.engines.google import _fetch_supported_languages, supported_languages_url from searx.url_utils import urlencode +from searx.utils import match_language # search-url categories = ['news'] @@ -50,8 +51,9 @@ def request(query, params): params['url'] = search_url.format(query=urlencode({'q': query}), search_options=urlencode(search_options)) - language_array = params['language'].lower().split('-') - params['url'] += '&lr=lang_' + language_array[0] + language = match_language(params['language'], supported_languages).split('-')[0] + if language: + params['url'] += '&lr=lang_' + language return params diff --git a/searx/engines/qwant.py b/searx/engines/qwant.py index 408c2b3de..239193b96 100644 --- a/searx/engines/qwant.py +++ b/searx/engines/qwant.py @@ -14,6 +14,7 @@ from datetime import datetime from json import loads from searx.utils import html_to_text from searx.url_utils import urlencode +from searx.utils import match_language # engine dependent config categories = None @@ -45,16 +46,8 @@ def request(query, params): offset=offset) # add language tag - if params['language'] == 'no' or params['language'].startswith('no-'): - params['language'] = params['language'].replace('no', 'nb', 1) - if params['language'].find('-') < 0: - # tries to get a country code from language - for lang in supported_languages: - lc = lang.split('-') - if params['language'] == lc[0]: - params['language'] = lang - break - params['url'] += '&locale=' + params['language'].replace('-', '_').lower() + language = match_language(params['language'], supported_languages) + params['url'] += '&locale=' + language.replace('-', '_').lower() return params diff --git a/searx/engines/swisscows.py b/searx/engines/swisscows.py index 45e9d87a9..ff4df24b7 100644 --- a/searx/engines/swisscows.py +++ b/searx/engines/swisscows.py @@ -14,6 +14,7 @@ from json import loads import re from lxml.html import fromstring from searx.url_utils import unquote, urlencode +from searx.utils import match_language # engine dependent config categories = ['general', 'images'] @@ -35,11 +36,8 @@ regex_img_url_remove_start = re.compile(b'^https?://i\.swisscows\.ch/\?link=') # do search-request def request(query, params): - if params['language'].split('-')[0] == 'no': - region = 'nb-NO' - else: - region = params['language'] - ui_language = params['language'].split('-')[0] + region = match_language(params['language'], supported_languages) + ui_language = region.split('-')[0] search_path = search_string.format( query=urlencode({'query': query, 'uiLanguage': ui_language, 'region': region}), diff --git a/searx/engines/wikidata.py b/searx/engines/wikidata.py index 1f31a1f88..1fdbc9869 100644 --- a/searx/engines/wikidata.py +++ b/searx/engines/wikidata.py @@ -16,6 +16,7 @@ from searx.poolrequests import get from searx.engines.xpath import extract_text from searx.engines.wikipedia import _fetch_supported_languages, supported_languages_url from searx.url_utils import urlencode +from searx.utils import match_language from json import loads from lxml.html import fromstring @@ -56,7 +57,7 @@ calendar_name_xpath = './/sup[contains(@class,"wb-calendar-name")]' def request(query, params): - language = params['language'].split('-')[0] + language = match_language(params['language'], supported_languages).split('-')[0] params['url'] = url_search.format( query=urlencode({'label': query, 'language': language})) @@ -68,7 +69,7 @@ def response(resp): html = fromstring(resp.text) wikidata_ids = html.xpath(wikidata_ids_xpath) - language = resp.search_params['language'].split('-')[0] + language = match_language(resp.search_params['language'], supported_languages).split('-')[0] # TODO: make requests asynchronous to avoid timeout when result_count > 1 for wikidata_id in wikidata_ids[:result_count]: diff --git a/searx/engines/wikipedia.py b/searx/engines/wikipedia.py index fe82f5115..6cd17e378 100644 --- a/searx/engines/wikipedia.py +++ b/searx/engines/wikipedia.py @@ -13,6 +13,7 @@ from json import loads from lxml.html import fromstring from searx.url_utils import quote, urlencode +from searx.utils import match_language # search-url base_url = u'https://{language}.wikipedia.org/' @@ -30,13 +31,7 @@ supported_languages_url = 'https://meta.wikimedia.org/wiki/List_of_Wikipedias' # set language in base_url def url_lang(lang): - lang = lang.split('-')[0] - if lang not in supported_languages: - language = 'en' - else: - language = lang - - return language + return match_language(lang, supported_languages).split('-')[0] # do search-request diff --git a/searx/engines/yahoo.py b/searx/engines/yahoo.py index 626a398b5..ba4cb6af8 100644 --- a/searx/engines/yahoo.py +++ b/searx/engines/yahoo.py @@ -14,6 +14,7 @@ from lxml import html from searx.engines.xpath import extract_text, extract_url from searx.url_utils import unquote, urlencode +from searx.utils import match_language # engine dependent config categories = ['general'] @@ -39,6 +40,8 @@ time_range_dict = {'day': ['1d', 'd'], 'week': ['1w', 'w'], 'month': ['1m', 'm']} +language_aliases = {'zh-CN': 'zh-CHS', 'zh-TW': 'zh-CHT', 'zh-HK': 'zh-CHT'} + # remove yahoo-specific tracking-url def parse_url(url_string): @@ -70,23 +73,16 @@ def _get_url(query, offset, language, time_range): lang=language) -def _get_language(params): - if params['language'][:2] == 'zh': - if params['language'] == 'zh' or params['language'] == 'zh-CH': - return 'szh' - else: - return 'tzh' - else: - return params['language'].split('-')[0] - - # do search-request def request(query, params): if params['time_range'] and params['time_range'] not in time_range_dict: return params offset = (params['pageno'] - 1) * 10 + 1 - language = _get_language(params) + language = match_language(params['language'], supported_languages, language_aliases) + if language not in language_aliases.values(): + language = language.split('-')[0] + language = language.replace('-', '_').lower() params['url'] = _get_url(query, offset, language, params['time_range']) @@ -145,7 +141,11 @@ def _fetch_supported_languages(resp): dom = html.fromstring(resp.text) options = dom.xpath('//div[@id="yschlang"]/span/label/input') for option in options: - code = option.xpath('./@value')[0][5:].replace('_', '-') + code_parts = option.xpath('./@value')[0][5:].split('_') + if len(code_parts) == 2: + code = code_parts[0] + '-' + code_parts[1].upper() + else: + code = code_parts[0] supported_languages.append(code) return supported_languages diff --git a/searx/engines/yahoo_news.py b/searx/engines/yahoo_news.py index 69e9aef4f..b61384d06 100644 --- a/searx/engines/yahoo_news.py +++ b/searx/engines/yahoo_news.py @@ -13,9 +13,12 @@ import re from datetime import datetime, timedelta from lxml import html from searx.engines.xpath import extract_text, extract_url -from searx.engines.yahoo import parse_url, _fetch_supported_languages, supported_languages_url +from searx.engines.yahoo import ( + parse_url, _fetch_supported_languages, supported_languages_url, language_aliases +) from dateutil import parser from searx.url_utils import urlencode +from searx.utils import match_language # engine dependent config categories = ['news'] @@ -38,7 +41,7 @@ suggestion_xpath = '//div[contains(@class,"VerALSOTRY")]//a' def request(query, params): offset = (params['pageno'] - 1) * 10 + 1 - language = params['language'].split('-')[0] + language = match_language(params['language'], supported_languages, language_aliases).split('-')[0] params['url'] = search_url.format(offset=offset, query=urlencode({'p': query}), |