diff options
author | Frank de Lange <github-f@unternet.org> | 2019-07-27 08:22:02 +0200 |
---|---|---|
committer | Alexandre Flament <alex@al-f.net> | 2019-07-27 08:22:02 +0200 |
commit | 11fc9913e9dfe170d07d8d17e0202282e3f92d6f (patch) | |
tree | 445437838139dfa2fdabe9d0c7c52eed10b4575b /searx/engines/bing_images.py | |
parent | f34b5cedb11bcb38eb2ad76cc3ed2b5eb421a75a (diff) | |
download | searxng-11fc9913e9dfe170d07d8d17e0202282e3f92d6f.tar.gz searxng-11fc9913e9dfe170d07d8d17e0202282e3f92d6f.zip |
[enh] bing_images: use data from embedded JSON to improve results (e.g. real page title) (#1568)
use data from embedded JSON to improve results (e.g. real page title), add image format and source info (see PR #1567), improve paging logic (it now works)
Diffstat (limited to 'searx/engines/bing_images.py')
-rw-r--r-- | searx/engines/bing_images.py | 67 |
1 files changed, 33 insertions, 34 deletions
diff --git a/searx/engines/bing_images.py b/searx/engines/bing_images.py index e2495200c..44e2c3bbc 100644 --- a/searx/engines/bing_images.py +++ b/searx/engines/bing_images.py @@ -10,9 +10,6 @@ @stable no (HTML can change) @parse url, title, img_src - @todo currently there are up to 35 images receive per page, - because bing does not parse count=10. - limited response to 10 images """ from lxml import html @@ -28,10 +25,15 @@ safesearch = True time_range_support = True language_support = True supported_languages_url = 'https://www.bing.com/account/general' +number_of_results = 28 # search-url base_url = 'https://www.bing.com/' -search_string = 'images/search?{query}&count=10&first={offset}' +search_string = 'images/search'\ + '?{query}'\ + '&count={count}'\ + '&first={first}'\ + '&FORM=IBASEP' time_range_string = '&qft=+filterui:age-lt{interval}' time_range_dict = {'day': '1440', 'week': '10080', @@ -44,16 +46,14 @@ safesearch_types = {2: 'STRICT', 0: 'OFF'} -_quote_keys_regex = re.compile('({|,)([a-z][a-z0-9]*):(")', re.I | re.U) - - # do search-request def request(query, params): - offset = (params['pageno'] - 1) * 10 + 1 + offset = ((params['pageno'] - 1) * number_of_results) + 1 search_path = search_string.format( query=urlencode({'q': query}), - offset=offset) + count=number_of_results, + first=offset) language = match_language(params['language'], supported_languages, language_aliases).lower() @@ -77,32 +77,31 @@ def response(resp): dom = html.fromstring(resp.text) # parse results - for result in dom.xpath('//div[@id="mmComponent_images_1"]/ul/li/div/div[@class="imgpt"]'): - link = result.xpath('./a')[0] - - # TODO find actual title - title = link.xpath('.//img/@alt')[0] - - # parse json-data (it is required to add a space, to make it parsable) - json_data = loads(_quote_keys_regex.sub(r'\1"\2": \3', link.attrib.get('m'))) - - url = json_data.get('purl') - img_src = json_data.get('murl') - thumbnail = json_data.get('turl') - - # append result - results.append({'template': 'images.html', - 'url': url, - 'title': title, - 'content': '', - 'thumbnail_src': thumbnail, - 'img_src': img_src}) - - # TODO stop parsing if 10 images are found - # if len(results) >= 10: - # break + for result in dom.xpath('//div[@class="imgpt"]'): + + img_format = result.xpath('./div[contains(@class, "img_info")]/span/text()')[0] + # Microsoft seems to experiment with this code so don't make the path too specific, + # just catch the text section for the first anchor in img_info assuming this to be + # the originating site. + source = result.xpath('./div[contains(@class, "img_info")]//a/text()')[0] + + try: + m = loads(result.xpath('./a/@m')[0]) + + # strip 'Unicode private use area' highlighting, they render to Tux + # the Linux penguin and a standing diamond on my machine... + title = m.get('t', '').replace(u'\ue000', '').replace(u'\ue001', '') + results.append({'template': 'images.html', + 'url': m['purl'], + 'thumbnail_src': m['turl'], + 'img_src': m['murl'], + 'content': '', + 'title': title, + 'source': source, + 'img_format': img_format}) + except: + continue - # return results return results |