summaryrefslogtreecommitdiff
path: root/searx/engines
diff options
context:
space:
mode:
Diffstat (limited to 'searx/engines')
-rw-r--r--searx/engines/bing_images.py9
-rw-r--r--searx/engines/blekko_images.py68
-rw-r--r--searx/engines/gigablast.py63
-rw-r--r--searx/engines/google_images.py11
-rw-r--r--searx/engines/piratebay.py2
5 files changed, 149 insertions, 4 deletions
diff --git a/searx/engines/bing_images.py b/searx/engines/bing_images.py
index 9d1c22f5a..b8c61c151 100644
--- a/searx/engines/bing_images.py
+++ b/searx/engines/bing_images.py
@@ -21,12 +21,18 @@ import re
# engine dependent config
categories = ['images']
paging = True
+safesearch = True
# search-url
base_url = 'https://www.bing.com/'
search_string = 'images/search?{query}&count=10&first={offset}'
thumb_url = "http://ts1.mm.bing.net/th?id={ihk}"
+# safesearch definitions
+safesearch_types = {2: 'STRICT',
+ 1: 'DEMOTE',
+ 0: 'OFF'}
+
# do search-request
def request(query, params):
@@ -43,7 +49,8 @@ def request(query, params):
offset=offset)
params['cookies']['SRCHHPGUSR'] = \
- 'NEWWND=0&NRSLT=-1&SRCHLANG=' + language.split('-')[0]
+ 'NEWWND=0&NRSLT=-1&SRCHLANG=' + language.split('-')[0] +\
+ '&ADLT=' + safesearch_types.get(params['safesearch'], 'DEMOTE')
params['url'] = base_url + search_path
diff --git a/searx/engines/blekko_images.py b/searx/engines/blekko_images.py
new file mode 100644
index 000000000..2e7ec904f
--- /dev/null
+++ b/searx/engines/blekko_images.py
@@ -0,0 +1,68 @@
+## Blekko (Images)
+#
+# @website https://blekko.com
+# @provide-api yes (inofficial)
+#
+# @using-api yes
+# @results JSON
+# @stable yes
+# @parse url, title, img_src
+
+from json import loads
+from urllib import urlencode
+
+# engine dependent config
+categories = ['images']
+paging = True
+safesearch = True
+
+# search-url
+base_url = 'https://blekko.com'
+search_url = '/api/images?{query}&c={c}'
+
+# safesearch definitions
+safesearch_types = {2: '1',
+ 1: '',
+ 0: '0'}
+
+
+# do search-request
+def request(query, params):
+ c = (params['pageno'] - 1) * 48
+
+ params['url'] = base_url +\
+ search_url.format(query=urlencode({'q': query}),
+ c=c)
+
+ if params['pageno'] != 1:
+ params['url'] += '&page={pageno}'.format(pageno=(params['pageno']-1))
+
+ # let Blekko know we wan't have profiling
+ params['cookies']['tag_lesslogging'] = '1'
+
+ # parse safesearch argument
+ params['cookies']['safesearch'] = safesearch_types.get(params['safesearch'], '')
+
+ return params
+
+
+# get response from search-request
+def response(resp):
+ results = []
+
+ search_results = loads(resp.text)
+
+ # return empty array if there are no results
+ if not search_results:
+ return []
+
+ for result in search_results:
+ # append result
+ results.append({'url': result['page_url'],
+ 'title': result['title'],
+ 'content': '',
+ 'img_src': result['url'],
+ 'template': 'images.html'})
+
+ # return results
+ return results
diff --git a/searx/engines/gigablast.py b/searx/engines/gigablast.py
new file mode 100644
index 000000000..8749c3256
--- /dev/null
+++ b/searx/engines/gigablast.py
@@ -0,0 +1,63 @@
+## Gigablast (Web)
+#
+# @website http://gigablast.com
+# @provide-api yes (http://gigablast.com/api.html)
+#
+# @using-api yes
+# @results XML
+# @stable yes
+# @parse url, title, content
+
+from urllib import urlencode
+from cgi import escape
+from lxml import etree
+
+# engine dependent config
+categories = ['general']
+paging = True
+number_of_results = 5
+
+# search-url
+base_url = 'http://gigablast.com/'
+search_string = 'search?{query}&n={number_of_results}&s={offset}&xml=1&qh=0'
+
+# specific xpath variables
+results_xpath = '//response//result'
+url_xpath = './/url'
+title_xpath = './/title'
+content_xpath = './/sum'
+
+
+# do search-request
+def request(query, params):
+ offset = (params['pageno'] - 1) * number_of_results
+
+ search_path = search_string.format(
+ query=urlencode({'q': query}),
+ offset=offset,
+ number_of_results=number_of_results)
+
+ params['url'] = base_url + search_path
+
+ return params
+
+
+# get response from search-request
+def response(resp):
+ results = []
+
+ dom = etree.fromstring(resp.content)
+
+ # parse results
+ for result in dom.xpath(results_xpath):
+ url = result.xpath(url_xpath)[0].text
+ title = result.xpath(title_xpath)[0].text
+ content = escape(result.xpath(content_xpath)[0].text)
+
+ # append result
+ results.append({'url': url,
+ 'title': title,
+ 'content': content})
+
+ # return results
+ return results
diff --git a/searx/engines/google_images.py b/searx/engines/google_images.py
index 092ae6639..1c0e62f5c 100644
--- a/searx/engines/google_images.py
+++ b/searx/engines/google_images.py
@@ -15,18 +15,25 @@ from json import loads
# engine dependent config
categories = ['images']
paging = True
+safesearch = True
# search-url
url = 'https://ajax.googleapis.com/'
-search_url = url + 'ajax/services/search/images?v=1.0&start={offset}&rsz=large&safe=off&filter=off&{query}'
+search_url = url + 'ajax/services/search/images?v=1.0&start={offset}&rsz=large&safe={safesearch}&filter=off&{query}'
# do search-request
def request(query, params):
offset = (params['pageno'] - 1) * 8
+ if params['safesearch'] == 0:
+ safesearch = 'off'
+ else:
+ safesearch = 'on'
+
params['url'] = search_url.format(query=urlencode({'q': query}),
- offset=offset)
+ offset=offset,
+ safesearch=safesearch)
return params
diff --git a/searx/engines/piratebay.py b/searx/engines/piratebay.py
index 207df276c..fa5c61128 100644
--- a/searx/engines/piratebay.py
+++ b/searx/engines/piratebay.py
@@ -20,7 +20,7 @@ categories = ['videos', 'music', 'files']
paging = True
# search-url
-url = 'https://thepiratebay.cr/'
+url = 'https://thepiratebay.se/'
search_url = url + 'search/{search_term}/{pageno}/99/{search_type}'
# piratebay specific type-definitions