From 2292e6e130dca104cb324197b63611a012e4ef3c Mon Sep 17 00:00:00 2001 From: Adam Tauber Date: Thu, 2 Jan 2020 22:28:47 +0100 Subject: [fix] handle missing result size --- searx/engines/bing.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'searx/engines') diff --git a/searx/engines/bing.py b/searx/engines/bing.py index ed0b87dbd..24776c400 100644 --- a/searx/engines/bing.py +++ b/searx/engines/bing.py @@ -63,6 +63,8 @@ def response(resp): results = [] result_len = 0 + + dom = html.fromstring(resp.text) # parse results for result in eval_xpath(dom, '//div[@class="sa_cc"]'): @@ -89,8 +91,7 @@ def response(resp): 'content': content}) try: - result_len_container = "".join(eval_xpath(dom, '//span[@class="sb_count"]/text()')) - result_len_container = utils.to_string(result_len_container) + result_len_container = "".join(eval_xpath(dom, '//span[@class="sb_count"]//text()')) if "-" in result_len_container: # Remove the part "from-to" for paginated request ... result_len_container = result_len_container[result_len_container.find("-") * 2 + 2:] @@ -102,7 +103,7 @@ def response(resp): logger.debug('result error :\n%s', e) pass - if _get_offset_from_pageno(resp.search_params.get("pageno", 0)) > result_len: + if result_len and _get_offset_from_pageno(resp.search_params.get("pageno", 0)) > result_len: return [] results.append({'number_of_results': result_len}) -- cgit v1.2.3-54-g00ecf From 2dc2e1e8f9c8ae0d28df56f42b2f4949d8611624 Mon Sep 17 00:00:00 2001 From: Adam Tauber Date: Thu, 2 Jan 2020 22:29:10 +0100 Subject: [fix] skip invalid encoded attributes --- searx/engines/flickr_noapi.py | 26 +++++++++++++++++--------- 1 file changed, 17 insertions(+), 9 deletions(-) (limited to 'searx/engines') diff --git a/searx/engines/flickr_noapi.py b/searx/engines/flickr_noapi.py index 198ac2cff..e1abb378f 100644 --- a/searx/engines/flickr_noapi.py +++ b/searx/engines/flickr_noapi.py @@ -109,14 +109,22 @@ def response(resp): else: url = build_flickr_url(photo['ownerNsid'], photo['id']) - results.append({'url': url, - 'title': title, - 'img_src': img_src, - 'thumbnail_src': thumbnail_src, - 'content': content, - 'author': author, - 'source': source, - 'img_format': img_format, - 'template': 'images.html'}) + result = { + 'url': url, + 'img_src': img_src, + 'thumbnail_src': thumbnail_src, + 'source': source, + 'img_format': img_format, + 'template': 'images.html' + } + try: + result['author'] = author.encode('utf-8') + result['title'] = title.encode('utf-8') + result['content'] = content.encode('utf-8') + except: + result['author'] = '' + result['title'] = '' + result['content'] = '' + results.append(result) return results -- cgit v1.2.3-54-g00ecf From 86a378bd0109684bd45c917f94068e3c98441904 Mon Sep 17 00:00:00 2001 From: Adam Tauber Date: Thu, 2 Jan 2020 22:29:28 +0100 Subject: [fix] handle missing thumbnail --- searx/engines/ina.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'searx/engines') diff --git a/searx/engines/ina.py b/searx/engines/ina.py index 37a05f099..ea509649f 100644 --- a/searx/engines/ina.py +++ b/searx/engines/ina.py @@ -32,7 +32,7 @@ base_url = 'https://www.ina.fr' search_url = base_url + '/layout/set/ajax/recherche/result?autopromote=&hf={ps}&b={start}&type=Video&r=&{query}' # specific xpath variables -results_xpath = '//div[contains(@class,"search-results--list")]/div[@class="media"]' +results_xpath = '//div[contains(@class,"search-results--list")]//div[@class="media-body"]' url_xpath = './/a/@href' title_xpath = './/h3[@class="h3--title media-heading"]' thumbnail_xpath = './/img/@src' @@ -65,8 +65,11 @@ def response(resp): videoid = result.xpath(url_xpath)[0] url = base_url + videoid title = p.unescape(extract_text(result.xpath(title_xpath))) - thumbnail = extract_text(result.xpath(thumbnail_xpath)[0]) - if thumbnail[0] == '/': + try: + thumbnail = extract_text(result.xpath(thumbnail_xpath)[0]) + except: + thumbnail = '' + if thumbnail and thumbnail[0] == '/': thumbnail = base_url + thumbnail d = extract_text(result.xpath(publishedDate_xpath)[0]) d = d.split('/') -- cgit v1.2.3-54-g00ecf From 1e6253ce16346fc6f439a07211b56770d06ba225 Mon Sep 17 00:00:00 2001 From: Adam Tauber Date: Thu, 2 Jan 2020 22:29:55 +0100 Subject: [fix] handle empty response --- searx/engines/microsoft_academic.py | 2 ++ 1 file changed, 2 insertions(+) (limited to 'searx/engines') diff --git a/searx/engines/microsoft_academic.py b/searx/engines/microsoft_academic.py index 9387b08d0..9bac0069c 100644 --- a/searx/engines/microsoft_academic.py +++ b/searx/engines/microsoft_academic.py @@ -45,6 +45,8 @@ def request(query, params): def response(resp): results = [] response_data = loads(resp.text) + if not response_data: + return results for result in response_data['results']: url = _get_url(result) -- cgit v1.2.3-54-g00ecf From ad5bb994b1cff56c4f021f88bfa62f38055f1416 Mon Sep 17 00:00:00 2001 From: Adam Tauber Date: Thu, 2 Jan 2020 22:30:18 +0100 Subject: [fix] add py3 compatibility --- searx/engines/scanr_structures.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'searx/engines') diff --git a/searx/engines/scanr_structures.py b/searx/engines/scanr_structures.py index 72fd2b3c9..7208dcb70 100644 --- a/searx/engines/scanr_structures.py +++ b/searx/engines/scanr_structures.py @@ -29,7 +29,7 @@ def request(query, params): params['url'] = search_url params['method'] = 'POST' params['headers']['Content-type'] = "application/json" - params['data'] = dumps({"query": query, + params['data'] = dumps({"query": query.decode('utf-8'), "searchField": "ALL", "sortDirection": "ASC", "sortOrder": "RELEVANCY", -- cgit v1.2.3-54-g00ecf From 17b6faa4c3c1cf14a327f4a3538fc70dce08b756 Mon Sep 17 00:00:00 2001 From: Adam Tauber Date: Thu, 2 Jan 2020 22:37:06 +0100 Subject: [fix] pep8 --- searx/engines/bing.py | 2 -- 1 file changed, 2 deletions(-) (limited to 'searx/engines') diff --git a/searx/engines/bing.py b/searx/engines/bing.py index 24776c400..b193f7c60 100644 --- a/searx/engines/bing.py +++ b/searx/engines/bing.py @@ -63,8 +63,6 @@ def response(resp): results = [] result_len = 0 - - dom = html.fromstring(resp.text) # parse results for result in eval_xpath(dom, '//div[@class="sa_cc"]'): -- cgit v1.2.3-54-g00ecf