diff options
author | Grant Lanham <contact@grantlanham.com> | 2024-08-19 23:02:06 -0400 |
---|---|---|
committer | Markus Heiser <markus.heiser@darmarIT.de> | 2024-08-21 08:41:53 +0200 |
commit | 5276219b9d790baeeb505813bb76d0dffa1d2d51 (patch) | |
tree | 8b70c30e493d4a7454ca642b5d98cf347a4a1f42 /searx/engines/tineye.py | |
parent | 5be55e3309761842e070f48580a519499cfc8ceb (diff) | |
download | searxng-5276219b9d790baeeb505813bb76d0dffa1d2d51.tar.gz searxng-5276219b9d790baeeb505813bb76d0dffa1d2d51.zip |
Fix tineye engine url, datetime parsing, and minor refactor
Changes made to tineye engine:
1. Importing logging if TYPE_CHECKING is enabled
2. Remove unecessary try-catch around json parsing the response, as this
masked the original error and had no immediate benefit
3. Improve error handling explicitely for status code 422 and 400
upfront, deferring json_parsing only for these status codes and
successful status codes
4. Unit test all new applicable changes to ensure compatability
Diffstat (limited to 'searx/engines/tineye.py')
-rw-r--r-- | searx/engines/tineye.py | 57 |
1 files changed, 28 insertions, 29 deletions
diff --git a/searx/engines/tineye.py b/searx/engines/tineye.py index 196c89a2b..c35799c69 100644 --- a/searx/engines/tineye.py +++ b/searx/engines/tineye.py @@ -14,10 +14,16 @@ billion images `[tineye.com] <https://tineye.com/how>`_. """ +from typing import TYPE_CHECKING from urllib.parse import urlencode from datetime import datetime from flask_babel import gettext +if TYPE_CHECKING: + import logging + + logger = logging.getLogger() + about = { "website": 'https://tineye.com', "wikidata_id": 'Q2382535', @@ -34,7 +40,7 @@ categories = ['general'] paging = True safesearch = False base_url = 'https://tineye.com' -search_string = '/result_json/?page={page}&{query}' +search_string = '/api/v1/result_json/?page={page}&{query}' FORMAT_NOT_SUPPORTED = gettext( "Could not read that image url. This may be due to an unsupported file" @@ -120,7 +126,7 @@ def parse_tineye_match(match_json): crawl_date = backlink_json.get("crawl_date") if crawl_date: - crawl_date = datetime.fromisoformat(crawl_date[:-3]) + crawl_date = datetime.strptime(crawl_date, '%Y-%m-%d') else: crawl_date = datetime.min @@ -150,29 +156,15 @@ def parse_tineye_match(match_json): def response(resp): """Parse HTTP response from TinEye.""" - results = [] - try: + # handle the 422 client side errors, and the possible 400 status code error + if resp.status_code in (400, 422): json_data = resp.json() - except Exception as exc: # pylint: disable=broad-except - msg = "can't parse JSON response // %s" % exc - logger.error(msg) - json_data = {'error': msg} - - # handle error codes from Tineye - - if resp.is_error: - if resp.status_code in (400, 422): - - message = 'HTTP status: %s' % resp.status_code - error = json_data.get('error') - s_key = json_data.get('suggestions', {}).get('key', '') - - if error and s_key: - message = "%s (%s)" % (error, s_key) - elif error: - message = error + suggestions = json_data.get('suggestions', {}) + message = f'HTTP Status Code: {resp.status_code}' + if resp.status_code == 422: + s_key = suggestions.get('key', '') if s_key == "Invalid image URL": # test https://docs.searxng.org/_static/searxng-wordmark.svg message = FORMAT_NOT_SUPPORTED @@ -182,16 +174,23 @@ def response(resp): elif s_key == 'Download Error': # test https://notexists message = DOWNLOAD_ERROR + else: + logger.warning("Unknown suggestion key encountered: %s", s_key) + else: # 400 + description = suggestions.get('description') + if isinstance(description, list): + message = ','.join(description) - # see https://github.com/searxng/searxng/pull/1456#issuecomment-1193105023 - # results.append({'answer': message}) - logger.error(message) - - return results + # see https://github.com/searxng/searxng/pull/1456#issuecomment-1193105023 + # results.append({'answer': message}) + logger.error(message) + return [] - resp.raise_for_status() + # Raise for all other responses + resp.raise_for_status() - # append results from matches + results = [] + json_data = resp.json() for match_json in json_data['matches']: |