diff options
Diffstat (limited to 'searx/engines/bing.py')
-rw-r--r-- | searx/engines/bing.py | 57 |
1 files changed, 37 insertions, 20 deletions
diff --git a/searx/engines/bing.py b/searx/engines/bing.py index 742379c1a..ed0b87dbd 100644 --- a/searx/engines/bing.py +++ b/searx/engines/bing.py @@ -13,10 +13,14 @@ @todo publishedDate """ +import re from lxml import html +from searx import logger, utils from searx.engines.xpath import extract_text from searx.url_utils import urlencode -from searx.utils import match_language, gen_useragent +from searx.utils import match_language, gen_useragent, eval_xpath + +logger = logger.getChild('bing engine') # engine dependent config categories = ['general'] @@ -30,9 +34,13 @@ base_url = 'https://www.bing.com/' search_string = 'search?{query}&first={offset}' +def _get_offset_from_pageno(pageno): + return (pageno - 1) * 10 + 1 + + # do search-request def request(query, params): - offset = (params['pageno'] - 1) * 10 + 1 + offset = _get_offset_from_pageno(params.get('pageno', 0)) if params['language'] == 'all': lang = 'EN' @@ -47,29 +55,21 @@ def request(query, params): params['url'] = base_url + search_path - params['headers']['User-Agent'] = gen_useragent('Windows NT 6.3; WOW64') - return params # get response from search-request def response(resp): results = [] + result_len = 0 dom = html.fromstring(resp.text) - - try: - results.append({'number_of_results': int(dom.xpath('//span[@class="sb_count"]/text()')[0] - .split()[0].replace(',', ''))}) - except: - pass - # parse results - for result in dom.xpath('//div[@class="sa_cc"]'): - link = result.xpath('.//h3/a')[0] + for result in eval_xpath(dom, '//div[@class="sa_cc"]'): + link = eval_xpath(result, './/h3/a')[0] url = link.attrib.get('href') title = extract_text(link) - content = extract_text(result.xpath('.//p')) + content = extract_text(eval_xpath(result, './/p')) # append result results.append({'url': url, @@ -77,18 +77,35 @@ def response(resp): 'content': content}) # parse results again if nothing is found yet - for result in dom.xpath('//li[@class="b_algo"]'): - link = result.xpath('.//h2/a')[0] + for result in eval_xpath(dom, '//li[@class="b_algo"]'): + link = eval_xpath(result, './/h2/a')[0] url = link.attrib.get('href') title = extract_text(link) - content = extract_text(result.xpath('.//p')) + content = extract_text(eval_xpath(result, './/p')) # append result results.append({'url': url, 'title': title, 'content': content}) - # return results + try: + result_len_container = "".join(eval_xpath(dom, '//span[@class="sb_count"]/text()')) + result_len_container = utils.to_string(result_len_container) + if "-" in result_len_container: + # Remove the part "from-to" for paginated request ... + result_len_container = result_len_container[result_len_container.find("-") * 2 + 2:] + + result_len_container = re.sub('[^0-9]', '', result_len_container) + if len(result_len_container) > 0: + result_len = int(result_len_container) + except Exception as e: + logger.debug('result error :\n%s', e) + pass + + if _get_offset_from_pageno(resp.search_params.get("pageno", 0)) > result_len: + return [] + + results.append({'number_of_results': result_len}) return results @@ -96,9 +113,9 @@ def response(resp): def _fetch_supported_languages(resp): supported_languages = [] dom = html.fromstring(resp.text) - options = dom.xpath('//div[@id="limit-languages"]//input') + options = eval_xpath(dom, '//div[@id="limit-languages"]//input') for option in options: - code = option.xpath('./@id')[0].replace('_', '-') + code = eval_xpath(option, './@id')[0].replace('_', '-') if code == 'nb': code = 'no' supported_languages.append(code) |