summaryrefslogtreecommitdiff
path: root/searx
diff options
context:
space:
mode:
authormisnyo <misnyo@users.noreply.github.com>2017-09-04 17:48:25 +0200
committerGitHub <noreply@github.com>2017-09-04 17:48:25 +0200
commitc3232b0e1a82315e5936fb3c0731548efd66a0b3 (patch)
tree2ba9db54ae7b36ae138285fc2a185afaa7e570ac /searx
parent01330f71cd60cab6c1ce9a19c28c2729f2d02344 (diff)
parent688801076d173de67bf4543ca289a35f28b6c245 (diff)
downloadsearxng-c3232b0e1a82315e5936fb3c0731548efd66a0b3.tar.gz
searxng-c3232b0e1a82315e5936fb3c0731548efd66a0b3.zip
Merge branch 'master' into nyaa
Diffstat (limited to 'searx')
-rw-r--r--searx/engines/generalfile.py62
-rw-r--r--searx/engines/gigablast.py8
-rw-r--r--searx/engines/google_news.py4
-rw-r--r--searx/settings.yml25
4 files changed, 21 insertions, 78 deletions
diff --git a/searx/engines/generalfile.py b/searx/engines/generalfile.py
deleted file mode 100644
index 3bb27444f..000000000
--- a/searx/engines/generalfile.py
+++ /dev/null
@@ -1,62 +0,0 @@
-"""
- General Files (Files)
-
- @website http://www.general-files.org
- @provide-api no (nothing found)
-
- @using-api no (because nothing found)
- @results HTML (using search portal)
- @stable no (HTML can change)
- @parse url, title, content
-
- @todo detect torrents?
-"""
-
-from lxml import html
-
-# engine dependent config
-categories = ['files']
-paging = True
-
-# search-url
-base_url = 'http://www.general-file.com'
-search_url = base_url + '/files-{letter}/{query}/{pageno}'
-
-# specific xpath variables
-result_xpath = '//table[@class="block-file"]'
-title_xpath = './/h2/a//text()'
-url_xpath = './/h2/a/@href'
-content_xpath = './/p//text()'
-
-
-# do search-request
-def request(query, params):
-
- params['url'] = search_url.format(query=query,
- letter=query[0],
- pageno=params['pageno'])
-
- return params
-
-
-# get response from search-request
-def response(resp):
- results = []
-
- dom = html.fromstring(resp.text)
-
- # parse results
- for result in dom.xpath(result_xpath):
- url = result.xpath(url_xpath)[0]
-
- # skip fast download links
- if not url.startswith('/'):
- continue
-
- # append result
- results.append({'url': base_url + url,
- 'title': ''.join(result.xpath(title_xpath)),
- 'content': ''.join(result.xpath(content_xpath))})
-
- # return results
- return results
diff --git a/searx/engines/gigablast.py b/searx/engines/gigablast.py
index 37933c69b..2bdc97fd1 100644
--- a/searx/engines/gigablast.py
+++ b/searx/engines/gigablast.py
@@ -10,6 +10,7 @@
@parse url, title, content
"""
+import random
from json import loads
from time import time
from lxml.html import fromstring
@@ -32,7 +33,8 @@ search_string = 'search?{query}'\
'&qh=0'\
'&qlang={lang}'\
'&ff={safesearch}'\
- '&rxikd={rxikd}' # random number - 9 digits
+ '&rxieu={rxieu}'\
+ '&rand={rxikd}' # current unix timestamp
# specific xpath variables
results_xpath = '//response//result'
@@ -59,10 +61,12 @@ def request(query, params):
else:
safesearch = 0
+ # rxieu is some kind of hash from the search query, but accepts random atm
search_path = search_string.format(query=urlencode({'q': query}),
offset=offset,
number_of_results=number_of_results,
- rxikd=str(time())[:9],
+ rxikd=int(time() * 1000),
+ rxieu=random.randint(1000000000, 9999999999),
lang=language,
safesearch=safesearch)
diff --git a/searx/engines/google_news.py b/searx/engines/google_news.py
index 7344b5289..8881d0dad 100644
--- a/searx/engines/google_news.py
+++ b/searx/engines/google_news.py
@@ -67,8 +67,8 @@ def response(resp):
for result in dom.xpath('//div[@class="g"]|//div[@class="g _cy"]'):
try:
r = {
- 'url': result.xpath('.//div[@class="_cnc"]//a/@href')[0],
- 'title': ''.join(result.xpath('.//div[@class="_cnc"]//h3//text()')),
+ 'url': result.xpath('.//a[@class="l _PMs"]')[0].attrib.get("href"),
+ 'title': ''.join(result.xpath('.//a[@class="l _PMs"]//text()')),
'content': ''.join(result.xpath('.//div[@class="st"]//text()')),
}
except:
diff --git a/searx/settings.yml b/searx/settings.yml
index 8f57e2e84..f262f4973 100644
--- a/searx/settings.yml
+++ b/searx/settings.yml
@@ -242,15 +242,16 @@ engines:
disabled: True
- name : gitlab
- engine : xpath
+ engine : json_engine
paging : True
- search_url : https://gitlab.com/search?page={pageno}&search={query}
- url_xpath : //li[@class="project-row"]//a[@class="project"]/@href
- title_xpath : //li[@class="project-row"]//span[contains(@class, "project-full-name")]
- content_xpath : //li[@class="project-row"]//div[@class="description"]/p
+ search_url : https://gitlab.com/api/v4/projects?search={query}&page={pageno}
+ url_query : web_url
+ title_query : name_with_namespace
+ content_query : description
+ page_size : 20
categories : it
shortcut : gl
- timeout : 5.0
+ timeout : 10.0
disabled : True
- name : github
@@ -321,9 +322,9 @@ engines:
engine : xpath
paging : True
search_url : https://geektimes.ru/search/page{pageno}/?q={query}
- url_xpath : //div[@class="search_results"]//a[@class="post__title_link"]/@href
- title_xpath : //div[@class="search_results"]//a[@class="post__title_link"]
- content_xpath : //div[@class="search_results"]//div[contains(@class, "content")]
+ url_xpath : //article[contains(@class, "post")]//a[@class="post__title_link"]/@href
+ title_xpath : //article[contains(@class, "post")]//a[@class="post__title_link"]
+ content_xpath : //article[contains(@class, "post")]//div[contains(@class, "post__text")]
categories : it
timeout : 4.0
disabled : True
@@ -333,9 +334,9 @@ engines:
engine : xpath
paging : True
search_url : https://habrahabr.ru/search/page{pageno}/?q={query}
- url_xpath : //div[@class="search_results"]//a[contains(@class, "post__title_link")]/@href
- title_xpath : //div[@class="search_results"]//a[contains(@class, "post__title_link")]
- content_xpath : //div[@class="search_results"]//div[contains(@class, "content")]
+ url_xpath : //article[contains(@class, "post")]//a[@class="post__title_link"]/@href
+ title_xpath : //article[contains(@class, "post")]//a[@class="post__title_link"]
+ content_xpath : //article[contains(@class, "post")]//div[contains(@class, "post__text")]
categories : it
timeout : 4.0
disabled : True