summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--searx/engines/1337x.py14
-rw-r--r--searx/engines/acgsou.py27
-rw-r--r--searx/engines/apkmirror.py9
-rw-r--r--searx/engines/archlinux.py6
-rw-r--r--searx/engines/arxiv.py20
-rw-r--r--searx/engines/xpath.py29
6 files changed, 51 insertions, 54 deletions
diff --git a/searx/engines/1337x.py b/searx/engines/1337x.py
index 9e045bc51..18478876a 100644
--- a/searx/engines/1337x.py
+++ b/searx/engines/1337x.py
@@ -1,6 +1,6 @@
from urllib.parse import quote, urljoin
from lxml import html
-from searx.utils import extract_text, get_torrent_size
+from searx.utils import extract_text, get_torrent_size, eval_xpath, eval_xpath_list, eval_xpath_getindex
url = 'https://1337x.to/'
@@ -20,12 +20,12 @@ def response(resp):
dom = html.fromstring(resp.text)
- for result in dom.xpath('//table[contains(@class, "table-list")]/tbody//tr'):
- href = urljoin(url, result.xpath('./td[contains(@class, "name")]/a[2]/@href')[0])
- title = extract_text(result.xpath('./td[contains(@class, "name")]/a[2]'))
- seed = extract_text(result.xpath('.//td[contains(@class, "seeds")]'))
- leech = extract_text(result.xpath('.//td[contains(@class, "leeches")]'))
- filesize_info = extract_text(result.xpath('.//td[contains(@class, "size")]/text()'))
+ for result in eval_xpath_list(dom, '//table[contains(@class, "table-list")]/tbody//tr'):
+ href = urljoin(url, eval_xpath_getindex(result, './td[contains(@class, "name")]/a[2]/@href', 0))
+ title = extract_text(eval_xpath(result, './td[contains(@class, "name")]/a[2]'))
+ seed = extract_text(eval_xpath(result, './/td[contains(@class, "seeds")]'))
+ leech = extract_text(eval_xpath(result, './/td[contains(@class, "leeches")]'))
+ filesize_info = extract_text(eval_xpath(result, './/td[contains(@class, "size")]/text()'))
filesize, filesize_multiplier = filesize_info.split()
filesize = get_torrent_size(filesize, filesize_multiplier)
diff --git a/searx/engines/acgsou.py b/searx/engines/acgsou.py
index a436df283..b8b367c24 100644
--- a/searx/engines/acgsou.py
+++ b/searx/engines/acgsou.py
@@ -11,7 +11,7 @@
from urllib.parse import urlencode
from lxml import html
-from searx.utils import extract_text, get_torrent_size
+from searx.utils import extract_text, get_torrent_size, eval_xpath_list, eval_xpath_getindex
# engine dependent config
categories = ['files', 'images', 'videos', 'music']
@@ -37,29 +37,26 @@ def request(query, params):
def response(resp):
results = []
dom = html.fromstring(resp.text)
- for result in dom.xpath(xpath_results):
+ for result in eval_xpath_list(dom, xpath_results):
# defaults
filesize = 0
magnet_link = "magnet:?xt=urn:btih:{}&tr=http://tracker.acgsou.com:2710/announce"
- try:
- category = extract_text(result.xpath(xpath_category)[0])
- except:
- pass
-
- page_a = result.xpath(xpath_title)[0]
+ category = extract_text(eval_xpath_getindex(result, xpath_category, 0, default=[]))
+ page_a = eval_xpath_getindex(result, xpath_title, 0)
title = extract_text(page_a)
href = base_url + page_a.attrib.get('href')
magnet_link = magnet_link.format(page_a.attrib.get('href')[5:-5])
- try:
- filesize_info = result.xpath(xpath_filesize)[0]
- filesize = filesize_info[:-2]
- filesize_multiplier = filesize_info[-2:]
- filesize = get_torrent_size(filesize, filesize_multiplier)
- except:
- pass
+ filesize_info = eval_xpath_getindex(result, xpath_filesize, 0, default=None)
+ if filesize_info:
+ try:
+ filesize = filesize_info[:-2]
+ filesize_multiplier = filesize_info[-2:]
+ filesize = get_torrent_size(filesize, filesize_multiplier)
+ except:
+ pass
# I didn't add download/seed/leech count since as I figured out they are generated randomly everytime
content = 'Category: "{category}".'
content = content.format(category=category)
diff --git a/searx/engines/apkmirror.py b/searx/engines/apkmirror.py
index a8ff499af..3a948dcb4 100644
--- a/searx/engines/apkmirror.py
+++ b/searx/engines/apkmirror.py
@@ -11,7 +11,7 @@
from urllib.parse import urlencode
from lxml import html
-from searx.utils import extract_text
+from searx.utils import extract_text, eval_xpath_list, eval_xpath_getindex
# engine dependent config
@@ -42,12 +42,13 @@ def response(resp):
dom = html.fromstring(resp.text)
# parse results
- for result in dom.xpath('.//div[@id="content"]/div[@class="listWidget"]/div[@class="appRow"]'):
+ for result in eval_xpath_list(dom, './/div[@id="content"]/div[@class="listWidget"]/div[@class="appRow"]'):
- link = result.xpath('.//h5/a')[0]
+ link = eval_xpath_getindex(result, './/h5/a', 0)
url = base_url + link.attrib.get('href') + '#downloads'
title = extract_text(link)
- thumbnail_src = base_url + result.xpath('.//img')[0].attrib.get('src').replace('&w=32&h=32', '&w=64&h=64')
+ thumbnail_src = base_url\
+ + eval_xpath_getindex(result, './/img', 0).attrib.get('src').replace('&w=32&h=32', '&w=64&h=64')
res = {
'url': url,
diff --git a/searx/engines/archlinux.py b/searx/engines/archlinux.py
index 8f93f4f38..04117c07d 100644
--- a/searx/engines/archlinux.py
+++ b/searx/engines/archlinux.py
@@ -13,7 +13,7 @@
from urllib.parse import urlencode, urljoin
from lxml import html
-from searx.utils import extract_text
+from searx.utils import extract_text, eval_xpath_list, eval_xpath_getindex
# engine dependent config
categories = ['it']
@@ -131,8 +131,8 @@ def response(resp):
dom = html.fromstring(resp.text)
# parse results
- for result in dom.xpath(xpath_results):
- link = result.xpath(xpath_link)[0]
+ for result in eval_xpath_list(dom, xpath_results):
+ link = eval_xpath_getindex(result, xpath_link, 0)
href = urljoin(base_url, link.attrib.get('href'))
title = extract_text(link)
diff --git a/searx/engines/arxiv.py b/searx/engines/arxiv.py
index 6e231c382..c702c5987 100644
--- a/searx/engines/arxiv.py
+++ b/searx/engines/arxiv.py
@@ -13,6 +13,7 @@
from lxml import html
from datetime import datetime
+from searx.utils import eval_xpath_list, eval_xpath_getindex
categories = ['science']
@@ -42,29 +43,26 @@ def response(resp):
results = []
dom = html.fromstring(resp.content)
- search_results = dom.xpath('//entry')
- for entry in search_results:
- title = entry.xpath('.//title')[0].text
+ for entry in eval_xpath_list(dom, '//entry'):
+ title = eval_xpath_getindex(entry, './/title', 0).text
- url = entry.xpath('.//id')[0].text
+ url = eval_xpath_getindex(entry, './/id', 0).text
content_string = '{doi_content}{abstract_content}'
- abstract = entry.xpath('.//summary')[0].text
+ abstract = eval_xpath_getindex(entry, './/summary', 0).text
# If a doi is available, add it to the snipppet
- try:
- doi_content = entry.xpath('.//link[@title="doi"]')[0].text
- content = content_string.format(doi_content=doi_content, abstract_content=abstract)
- except:
- content = content_string.format(doi_content="", abstract_content=abstract)
+ doi_element = eval_xpath_getindex(entry, './/link[@title="doi"]', 0, default=None)
+ doi_content = doi_element.text if doi_element is not None else ''
+ content = content_string.format(doi_content=doi_content, abstract_content=abstract)
if len(content) > 300:
content = content[0:300] + "..."
# TODO: center snippet on query term
- publishedDate = datetime.strptime(entry.xpath('.//published')[0].text, '%Y-%m-%dT%H:%M:%SZ')
+ publishedDate = datetime.strptime(eval_xpath_getindex(entry, './/published', 0).text, '%Y-%m-%dT%H:%M:%SZ')
res_dict = {'url': url,
'title': title,
diff --git a/searx/engines/xpath.py b/searx/engines/xpath.py
index a569d9160..d420e250a 100644
--- a/searx/engines/xpath.py
+++ b/searx/engines/xpath.py
@@ -1,6 +1,6 @@
from lxml import html
from urllib.parse import urlencode
-from searx.utils import extract_text, extract_url, eval_xpath
+from searx.utils import extract_text, extract_url, eval_xpath, eval_xpath_list
search_url = None
url_xpath = None
@@ -42,21 +42,22 @@ def response(resp):
is_onion = True if 'onions' in categories else False
if results_xpath:
- for result in eval_xpath(dom, results_xpath):
- url = extract_url(eval_xpath(result, url_xpath), search_url)
- title = extract_text(eval_xpath(result, title_xpath))
- content = extract_text(eval_xpath(result, content_xpath))
+ for result in eval_xpath_list(dom, results_xpath):
+ url = extract_url(eval_xpath_list(result, url_xpath, min_len=1), search_url)
+ title = extract_text(eval_xpath_list(result, title_xpath, min_len=1))
+ content = extract_text(eval_xpath_list(result, content_xpath, min_len=1))
tmp_result = {'url': url, 'title': title, 'content': content}
# add thumbnail if available
if thumbnail_xpath:
- thumbnail_xpath_result = eval_xpath(result, thumbnail_xpath)
+ thumbnail_xpath_result = eval_xpath_list(result, thumbnail_xpath)
if len(thumbnail_xpath_result) > 0:
tmp_result['img_src'] = extract_url(thumbnail_xpath_result, search_url)
# add alternative cached url if available
if cached_xpath:
- tmp_result['cached_url'] = cached_url + extract_text(result.xpath(cached_xpath))
+ tmp_result['cached_url'] = cached_url\
+ + extract_text(eval_xpath_list(result, cached_xpath, min_len=1))
if is_onion:
tmp_result['is_onion'] = True
@@ -66,19 +67,19 @@ def response(resp):
if cached_xpath:
for url, title, content, cached in zip(
(extract_url(x, search_url) for
- x in dom.xpath(url_xpath)),
- map(extract_text, dom.xpath(title_xpath)),
- map(extract_text, dom.xpath(content_xpath)),
- map(extract_text, dom.xpath(cached_xpath))
+ x in eval_xpath_list(dom, url_xpath)),
+ map(extract_text, eval_xpath_list(dom, title_xpath)),
+ map(extract_text, eval_xpath_list(dom, content_xpath)),
+ map(extract_text, eval_xpath_list(dom, cached_xpath))
):
results.append({'url': url, 'title': title, 'content': content,
'cached_url': cached_url + cached, 'is_onion': is_onion})
else:
for url, title, content in zip(
(extract_url(x, search_url) for
- x in dom.xpath(url_xpath)),
- map(extract_text, dom.xpath(title_xpath)),
- map(extract_text, dom.xpath(content_xpath))
+ x in eval_xpath_list(dom, url_xpath)),
+ map(extract_text, eval_xpath_list(dom, title_xpath)),
+ map(extract_text, eval_xpath_list(dom, content_xpath))
):
results.append({'url': url, 'title': title, 'content': content, 'is_onion': is_onion})