summaryrefslogtreecommitdiff
path: root/searx/engines
diff options
context:
space:
mode:
authorAdam Tauber <asciimoo@gmail.com>2019-12-02 13:39:58 +0000
committerGitHub <noreply@github.com>2019-12-02 13:39:58 +0000
commit731e34299d128f9352fd76e603c960c1f0628ed9 (patch)
treef591dc036131f2c3a045fbc557f66afc905646cb /searx/engines
parent574cb25a16c3011f1797115cb6c90117e9bd1e8e (diff)
parent85b37233458c21b775bf98568c0a5c9260aa14fe (diff)
downloadsearxng-731e34299d128f9352fd76e603c960c1f0628ed9.tar.gz
searxng-731e34299d128f9352fd76e603c960c1f0628ed9.zip
Merge pull request #1744 from dalf/optimizations
[mod] speed optimization
Diffstat (limited to 'searx/engines')
-rw-r--r--searx/engines/bing.py20
-rw-r--r--searx/engines/dictzone.py8
-rw-r--r--searx/engines/doku.py15
-rw-r--r--searx/engines/duckduckgo.py10
-rw-r--r--searx/engines/duden.py15
-rw-r--r--searx/engines/gigablast.py5
-rw-r--r--searx/engines/google.py32
-rw-r--r--searx/engines/startpage.py9
-rw-r--r--searx/engines/wikidata.py18
-rw-r--r--searx/engines/xpath.py20
-rw-r--r--searx/engines/yahoo.py18
11 files changed, 79 insertions, 91 deletions
diff --git a/searx/engines/bing.py b/searx/engines/bing.py
index 1e614867b..ed0b87dbd 100644
--- a/searx/engines/bing.py
+++ b/searx/engines/bing.py
@@ -18,7 +18,7 @@ from lxml import html
from searx import logger, utils
from searx.engines.xpath import extract_text
from searx.url_utils import urlencode
-from searx.utils import match_language, gen_useragent
+from searx.utils import match_language, gen_useragent, eval_xpath
logger = logger.getChild('bing engine')
@@ -65,11 +65,11 @@ def response(resp):
dom = html.fromstring(resp.text)
# parse results
- for result in dom.xpath('//div[@class="sa_cc"]'):
- link = result.xpath('.//h3/a')[0]
+ for result in eval_xpath(dom, '//div[@class="sa_cc"]'):
+ link = eval_xpath(result, './/h3/a')[0]
url = link.attrib.get('href')
title = extract_text(link)
- content = extract_text(result.xpath('.//p'))
+ content = extract_text(eval_xpath(result, './/p'))
# append result
results.append({'url': url,
@@ -77,11 +77,11 @@ def response(resp):
'content': content})
# parse results again if nothing is found yet
- for result in dom.xpath('//li[@class="b_algo"]'):
- link = result.xpath('.//h2/a')[0]
+ for result in eval_xpath(dom, '//li[@class="b_algo"]'):
+ link = eval_xpath(result, './/h2/a')[0]
url = link.attrib.get('href')
title = extract_text(link)
- content = extract_text(result.xpath('.//p'))
+ content = extract_text(eval_xpath(result, './/p'))
# append result
results.append({'url': url,
@@ -89,7 +89,7 @@ def response(resp):
'content': content})
try:
- result_len_container = "".join(dom.xpath('//span[@class="sb_count"]/text()'))
+ result_len_container = "".join(eval_xpath(dom, '//span[@class="sb_count"]/text()'))
result_len_container = utils.to_string(result_len_container)
if "-" in result_len_container:
# Remove the part "from-to" for paginated request ...
@@ -113,9 +113,9 @@ def response(resp):
def _fetch_supported_languages(resp):
supported_languages = []
dom = html.fromstring(resp.text)
- options = dom.xpath('//div[@id="limit-languages"]//input')
+ options = eval_xpath(dom, '//div[@id="limit-languages"]//input')
for option in options:
- code = option.xpath('./@id')[0].replace('_', '-')
+ code = eval_xpath(option, './@id')[0].replace('_', '-')
if code == 'nb':
code = 'no'
supported_languages.append(code)
diff --git a/searx/engines/dictzone.py b/searx/engines/dictzone.py
index 09db048cc..423af0971 100644
--- a/searx/engines/dictzone.py
+++ b/searx/engines/dictzone.py
@@ -11,7 +11,7 @@
import re
from lxml import html
-from searx.utils import is_valid_lang
+from searx.utils import is_valid_lang, eval_xpath
from searx.url_utils import urljoin
categories = ['general']
@@ -47,14 +47,14 @@ def response(resp):
dom = html.fromstring(resp.text)
- for k, result in enumerate(dom.xpath(results_xpath)[1:]):
+ for k, result in enumerate(eval_xpath(dom, results_xpath)[1:]):
try:
- from_result, to_results_raw = result.xpath('./td')
+ from_result, to_results_raw = eval_xpath(result, './td')
except:
continue
to_results = []
- for to_result in to_results_raw.xpath('./p/a'):
+ for to_result in eval_xpath(to_results_raw, './p/a'):
t = to_result.text_content()
if t.strip():
to_results.append(to_result.text_content())
diff --git a/searx/engines/doku.py b/searx/engines/doku.py
index a391be444..d20e66026 100644
--- a/searx/engines/doku.py
+++ b/searx/engines/doku.py
@@ -11,6 +11,7 @@
from lxml.html import fromstring
from searx.engines.xpath import extract_text
+from searx.utils import eval_xpath
from searx.url_utils import urlencode
# engine dependent config
@@ -45,16 +46,16 @@ def response(resp):
# parse results
# Quickhits
- for r in doc.xpath('//div[@class="search_quickresult"]/ul/li'):
+ for r in eval_xpath(doc, '//div[@class="search_quickresult"]/ul/li'):
try:
- res_url = r.xpath('.//a[@class="wikilink1"]/@href')[-1]
+ res_url = eval_xpath(r, './/a[@class="wikilink1"]/@href')[-1]
except:
continue
if not res_url:
continue
- title = extract_text(r.xpath('.//a[@class="wikilink1"]/@title'))
+ title = extract_text(eval_xpath(r, './/a[@class="wikilink1"]/@title'))
# append result
results.append({'title': title,
@@ -62,13 +63,13 @@ def response(resp):
'url': base_url + res_url})
# Search results
- for r in doc.xpath('//dl[@class="search_results"]/*'):
+ for r in eval_xpath(doc, '//dl[@class="search_results"]/*'):
try:
if r.tag == "dt":
- res_url = r.xpath('.//a[@class="wikilink1"]/@href')[-1]
- title = extract_text(r.xpath('.//a[@class="wikilink1"]/@title'))
+ res_url = eval_xpath(r, './/a[@class="wikilink1"]/@href')[-1]
+ title = extract_text(eval_xpath(r, './/a[@class="wikilink1"]/@title'))
elif r.tag == "dd":
- content = extract_text(r.xpath('.'))
+ content = extract_text(eval_xpath(r, '.'))
# append result
results.append({'title': title,
diff --git a/searx/engines/duckduckgo.py b/searx/engines/duckduckgo.py
index e77ef0126..0d2c0af2d 100644
--- a/searx/engines/duckduckgo.py
+++ b/searx/engines/duckduckgo.py
@@ -18,7 +18,7 @@ from json import loads
from searx.engines.xpath import extract_text
from searx.poolrequests import get
from searx.url_utils import urlencode
-from searx.utils import match_language
+from searx.utils import match_language, eval_xpath
# engine dependent config
categories = ['general']
@@ -106,19 +106,19 @@ def response(resp):
doc = fromstring(resp.text)
# parse results
- for i, r in enumerate(doc.xpath(result_xpath)):
+ for i, r in enumerate(eval_xpath(doc, result_xpath)):
if i >= 30:
break
try:
- res_url = r.xpath(url_xpath)[-1]
+ res_url = eval_xpath(r, url_xpath)[-1]
except:
continue
if not res_url:
continue
- title = extract_text(r.xpath(title_xpath))
- content = extract_text(r.xpath(content_xpath))
+ title = extract_text(eval_xpath(r, title_xpath))
+ content = extract_text(eval_xpath(r, content_xpath))
# append result
results.append({'title': title,
diff --git a/searx/engines/duden.py b/searx/engines/duden.py
index 444f18c1f..cf2f1a278 100644
--- a/searx/engines/duden.py
+++ b/searx/engines/duden.py
@@ -11,6 +11,7 @@
from lxml import html, etree
import re
from searx.engines.xpath import extract_text
+from searx.utils import eval_xpath
from searx.url_utils import quote, urljoin
from searx import logger
@@ -52,9 +53,9 @@ def response(resp):
dom = html.fromstring(resp.text)
try:
- number_of_results_string = re.sub('[^0-9]', '', dom.xpath(
- '//a[@class="active" and contains(@href,"/suchen/dudenonline")]/span/text()')[0]
- )
+ number_of_results_string =\
+ re.sub('[^0-9]', '',
+ eval_xpath(dom, '//a[@class="active" and contains(@href,"/suchen/dudenonline")]/span/text()')[0])
results.append({'number_of_results': int(number_of_results_string)})
@@ -62,12 +63,12 @@ def response(resp):
logger.debug("Couldn't read number of results.")
pass
- for result in dom.xpath('//section[not(contains(@class, "essay"))]'):
+ for result in eval_xpath(dom, '//section[not(contains(@class, "essay"))]'):
try:
- url = result.xpath('.//h2/a')[0].get('href')
+ url = eval_xpath(result, './/h2/a')[0].get('href')
url = urljoin(base_url, url)
- title = result.xpath('string(.//h2/a)').strip()
- content = extract_text(result.xpath('.//p'))
+ title = eval_xpath(result, 'string(.//h2/a)').strip()
+ content = extract_text(eval_xpath(result, './/p'))
# append result
results.append({'url': url,
'title': title,
diff --git a/searx/engines/gigablast.py b/searx/engines/gigablast.py
index 6b0402233..a84f3f69d 100644
--- a/searx/engines/gigablast.py
+++ b/searx/engines/gigablast.py
@@ -15,6 +15,7 @@ from json import loads
from time import time
from lxml.html import fromstring
from searx.url_utils import urlencode
+from searx.utils import eval_xpath
# engine dependent config
categories = ['general']
@@ -99,9 +100,9 @@ def response(resp):
def _fetch_supported_languages(resp):
supported_languages = []
dom = fromstring(resp.text)
- links = dom.xpath('//span[@id="menu2"]/a')
+ links = eval_xpath(dom, '//span[@id="menu2"]/a')
for link in links:
- href = link.xpath('./@href')[0].split('lang%3A')
+ href = eval_xpath(link, './@href')[0].split('lang%3A')
if len(href) == 2:
code = href[1].split('_')
if len(code) == 2:
diff --git a/searx/engines/google.py b/searx/engines/google.py
index 0a0d6ccb1..19bde710d 100644
--- a/searx/engines/google.py
+++ b/searx/engines/google.py
@@ -14,7 +14,7 @@ from lxml import html, etree
from searx.engines.xpath import extract_text, extract_url
from searx import logger
from searx.url_utils import urlencode, urlparse, parse_qsl
-from searx.utils import match_language
+from searx.utils import match_language, eval_xpath
logger = logger.getChild('google engine')
@@ -156,7 +156,7 @@ def parse_url(url_string, google_hostname):
# returns extract_text on the first result selected by the xpath or None
def extract_text_from_dom(result, xpath):
- r = result.xpath(xpath)
+ r = eval_xpath(result, xpath)
if len(r) > 0:
return extract_text(r[0])
return None
@@ -227,21 +227,21 @@ def response(resp):
# convert the text to dom
dom = html.fromstring(resp.text)
- instant_answer = dom.xpath('//div[@id="_vBb"]//text()')
+ instant_answer = eval_xpath(dom, '//div[@id="_vBb"]//text()')
if instant_answer:
results.append({'answer': u' '.join(instant_answer)})
try:
- results_num = int(dom.xpath('//div[@id="resultStats"]//text()')[0]
+ results_num = int(eval_xpath(dom, '//div[@id="resultStats"]//text()')[0]
.split()[1].replace(',', ''))
results.append({'number_of_results': results_num})
except:
pass
# parse results
- for result in dom.xpath(results_xpath):
+ for result in eval_xpath(dom, results_xpath):
try:
- title = extract_text(result.xpath(title_xpath)[0])
- url = parse_url(extract_url(result.xpath(url_xpath), google_url), google_hostname)
+ title = extract_text(eval_xpath(result, title_xpath)[0])
+ url = parse_url(extract_url(eval_xpath(result, url_xpath), google_url), google_hostname)
parsed_url = urlparse(url, google_hostname)
# map result
@@ -250,7 +250,7 @@ def response(resp):
continue
# if parsed_url.path.startswith(maps_path) or parsed_url.netloc.startswith(map_hostname_start):
# print "yooooo"*30
- # x = result.xpath(map_near)
+ # x = eval_xpath(result, map_near)
# if len(x) > 0:
# # map : near the location
# results = results + parse_map_near(parsed_url, x, google_hostname)
@@ -287,11 +287,11 @@ def response(resp):
continue
# parse suggestion
- for suggestion in dom.xpath(suggestion_xpath):
+ for suggestion in eval_xpath(dom, suggestion_xpath):
# append suggestion
results.append({'suggestion': extract_text(suggestion)})
- for correction in dom.xpath(spelling_suggestion_xpath):
+ for correction in eval_xpath(dom, spelling_suggestion_xpath):
results.append({'correction': extract_text(correction)})
# return results
@@ -300,9 +300,9 @@ def response(resp):
def parse_images(result, google_hostname):
results = []
- for image in result.xpath(images_xpath):
- url = parse_url(extract_text(image.xpath(image_url_xpath)[0]), google_hostname)
- img_src = extract_text(image.xpath(image_img_src_xpath)[0])
+ for image in eval_xpath(result, images_xpath):
+ url = parse_url(extract_text(eval_xpath(image, image_url_xpath)[0]), google_hostname)
+ img_src = extract_text(eval_xpath(image, image_img_src_xpath)[0])
# append result
results.append({'url': url,
@@ -389,10 +389,10 @@ def attributes_to_html(attributes):
def _fetch_supported_languages(resp):
supported_languages = {}
dom = html.fromstring(resp.text)
- options = dom.xpath('//*[@id="langSec"]//input[@name="lr"]')
+ options = eval_xpath(dom, '//*[@id="langSec"]//input[@name="lr"]')
for option in options:
- code = option.xpath('./@value')[0].split('_')[-1]
- name = option.xpath('./@data-name')[0].title()
+ code = eval_xpath(option, './@value')[0].split('_')[-1]
+ name = eval_xpath(option, './@data-name')[0].title()
supported_languages[code] = {"name": name}
return supported_languages
diff --git a/searx/engines/startpage.py b/searx/engines/startpage.py
index 0f0ec6e18..76567396f 100644
--- a/searx/engines/startpage.py
+++ b/searx/engines/startpage.py
@@ -16,6 +16,7 @@ from datetime import datetime, timedelta
import re
from searx.engines.xpath import extract_text
from searx.languages import language_codes
+from searx.utils import eval_xpath
# engine dependent config
categories = ['general']
@@ -70,8 +71,8 @@ def response(resp):
dom = html.fromstring(resp.text)
# parse results
- for result in dom.xpath(results_xpath):
- links = result.xpath(link_xpath)
+ for result in eval_xpath(dom, results_xpath):
+ links = eval_xpath(result, link_xpath)
if not links:
continue
link = links[0]
@@ -87,8 +88,8 @@ def response(resp):
title = extract_text(link)
- if result.xpath(content_xpath):
- content = extract_text(result.xpath(content_xpath))
+ if eval_xpath(result, content_xpath):
+ content = extract_text(eval_xpath(result, content_xpath))
else:
content = ''
diff --git a/searx/engines/wikidata.py b/searx/engines/wikidata.py
index 5ea2b9958..e913b3915 100644
--- a/searx/engines/wikidata.py
+++ b/searx/engines/wikidata.py
@@ -16,7 +16,7 @@ from searx.poolrequests import get
from searx.engines.xpath import extract_text
from searx.engines.wikipedia import _fetch_supported_languages, supported_languages_url
from searx.url_utils import urlencode
-from searx.utils import match_language
+from searx.utils import match_language, eval_xpath
from json import loads
from lxml.html import fromstring
@@ -57,22 +57,6 @@ language_fallback_xpath = '//sup[contains(@class,"wb-language-fallback-indicator
calendar_name_xpath = './/sup[contains(@class,"wb-calendar-name")]'
media_xpath = value_xpath + '//div[contains(@class,"commons-media-caption")]//a'
-# xpath_cache
-xpath_cache = {}
-
-
-def get_xpath(xpath_str):
- result = xpath_cache.get(xpath_str, None)
- if not result:
- result = etree.XPath(xpath_str)
- xpath_cache[xpath_str] = result
- return result
-
-
-def eval_xpath(element, xpath_str):
- xpath = get_xpath(xpath_str)
- return xpath(element)
-
def get_id_cache(result):
id_cache = {}
diff --git a/searx/engines/xpath.py b/searx/engines/xpath.py
index 61494ce4e..b75896cc7 100644
--- a/searx/engines/xpath.py
+++ b/searx/engines/xpath.py
@@ -1,6 +1,6 @@
from lxml import html
from lxml.etree import _ElementStringResult, _ElementUnicodeResult
-from searx.utils import html_to_text
+from searx.utils import html_to_text, eval_xpath
from searx.url_utils import unquote, urlencode, urljoin, urlparse
search_url = None
@@ -104,15 +104,15 @@ def response(resp):
results = []
dom = html.fromstring(resp.text)
if results_xpath:
- for result in dom.xpath(results_xpath):
- url = extract_url(result.xpath(url_xpath), search_url)
- title = extract_text(result.xpath(title_xpath))
- content = extract_text(result.xpath(content_xpath))
+ for result in eval_xpath(dom, results_xpath):
+ url = extract_url(eval_xpath(result, url_xpath), search_url)
+ title = extract_text(eval_xpath(result, title_xpath))
+ content = extract_text(eval_xpath(result, content_xpath))
tmp_result = {'url': url, 'title': title, 'content': content}
# add thumbnail if available
if thumbnail_xpath:
- thumbnail_xpath_result = result.xpath(thumbnail_xpath)
+ thumbnail_xpath_result = eval_xpath(result, thumbnail_xpath)
if len(thumbnail_xpath_result) > 0:
tmp_result['img_src'] = extract_url(thumbnail_xpath_result, search_url)
@@ -120,14 +120,14 @@ def response(resp):
else:
for url, title, content in zip(
(extract_url(x, search_url) for
- x in dom.xpath(url_xpath)),
- map(extract_text, dom.xpath(title_xpath)),
- map(extract_text, dom.xpath(content_xpath))
+ x in eval_xpath(dom, url_xpath)),
+ map(extract_text, eval_xpath(dom, title_xpath)),
+ map(extract_text, eval_xpath(dom, content_xpath))
):
results.append({'url': url, 'title': title, 'content': content})
if not suggestion_xpath:
return results
- for suggestion in dom.xpath(suggestion_xpath):
+ for suggestion in eval_xpath(dom, suggestion_xpath):
results.append({'suggestion': extract_text(suggestion)})
return results
diff --git a/searx/engines/yahoo.py b/searx/engines/yahoo.py
index 73b78bcf7..36c1a11f8 100644
--- a/searx/engines/yahoo.py
+++ b/searx/engines/yahoo.py
@@ -14,7 +14,7 @@
from lxml import html
from searx.engines.xpath import extract_text, extract_url
from searx.url_utils import unquote, urlencode
-from searx.utils import match_language
+from searx.utils import match_language, eval_xpath
# engine dependent config
categories = ['general']
@@ -109,21 +109,21 @@ def response(resp):
dom = html.fromstring(resp.text)
try:
- results_num = int(dom.xpath('//div[@class="compPagination"]/span[last()]/text()')[0]
+ results_num = int(eval_xpath(dom, '//div[@class="compPagination"]/span[last()]/text()')[0]
.split()[0].replace(',', ''))
results.append({'number_of_results': results_num})
except:
pass
# parse results
- for result in dom.xpath(results_xpath):
+ for result in eval_xpath(dom, results_xpath):
try:
- url = parse_url(extract_url(result.xpath(url_xpath), search_url))
- title = extract_text(result.xpath(title_xpath)[0])
+ url = parse_url(extract_url(eval_xpath(result, url_xpath), search_url))
+ title = extract_text(eval_xpath(result, title_xpath)[0])
except:
continue
- content = extract_text(result.xpath(content_xpath)[0])
+ content = extract_text(eval_xpath(result, content_xpath)[0])
# append result
results.append({'url': url,
@@ -131,7 +131,7 @@ def response(resp):
'content': content})
# if no suggestion found, return results
- suggestions = dom.xpath(suggestion_xpath)
+ suggestions = eval_xpath(dom, suggestion_xpath)
if not suggestions:
return results
@@ -148,9 +148,9 @@ def response(resp):
def _fetch_supported_languages(resp):
supported_languages = []
dom = html.fromstring(resp.text)
- options = dom.xpath('//div[@id="yschlang"]/span/label/input')
+ options = eval_xpath(dom, '//div[@id="yschlang"]/span/label/input')
for option in options:
- code_parts = option.xpath('./@value')[0][5:].split('_')
+ code_parts = eval_xpath(option, './@value')[0][5:].split('_')
if len(code_parts) == 2:
code = code_parts[0] + '-' + code_parts[1].upper()
else: