summaryrefslogtreecommitdiff
path: root/searx/engines
diff options
context:
space:
mode:
authorNick Espig <nickespig@gmail.com>2019-04-12 23:12:56 +0200
committerNick Espig <nickespig@gmail.com>2019-04-14 12:03:25 +0200
commit1c6ab79b9fa79906909d068d51d33ffca1c6dc4f (patch)
treeb5ff377e1572e2dfd863c88b22d6466429d63d41 /searx/engines
parent34d85c8c3c2191818544ecd5e6c0508590657a2b (diff)
downloadsearxng-1c6ab79b9fa79906909d068d51d33ffca1c6dc4f.tar.gz
searxng-1c6ab79b9fa79906909d068d51d33ffca1c6dc4f.zip
Fix google image search
- Because there is not full image url in the dom, we replace "image_url" with the same url as the "url" (url of source). See example HTML https://gist.github.com/Nachtalb/2dea8a4d2c723c49226ad9645838121f - Remove unused import - Fix google image search title - Keep google image safe value up to date
Diffstat (limited to 'searx/engines')
-rw-r--r--searx/engines/google_images.py38
1 files changed, 21 insertions, 17 deletions
diff --git a/searx/engines/google_images.py b/searx/engines/google_images.py
index 504831a10..66a543e57 100644
--- a/searx/engines/google_images.py
+++ b/searx/engines/google_images.py
@@ -11,7 +11,6 @@
"""
from datetime import date, timedelta
-from json import loads
from lxml import html
from searx.url_utils import urlencode, urlparse, parse_qs
@@ -39,7 +38,6 @@ time_range_dict = {'day': 'd',
# do search-request
def request(query, params):
search_options = {
- 'ijn': params['pageno'] - 1,
'start': (params['pageno'] - 1) * number_of_results
}
@@ -53,7 +51,7 @@ def request(query, params):
search_options['tbs'] = time_range_custom_attr.format(start=start, end=end)
if safesearch and params['safesearch']:
- search_options['safe'] = 'on'
+ search_options['safe'] = 'active'
params['url'] = search_url.format(query=urlencode({'q': query}),
search_options=urlencode(search_options))
@@ -63,24 +61,30 @@ def request(query, params):
# get response from search-request
def response(resp):
+ dom = html.fromstring(resp.text)
+
results = []
+ for element in dom.xpath('//div[@id="search"] //td'):
+ link = element.xpath('./a')[0]
- dom = html.fromstring(resp.text)
+ google_url = urlparse(link.xpath('.//@href')[0])
+ query = parse_qs(google_url.query)
+ source_url = next(iter(query.get('q', [])), None)
- # parse results
- for img in dom.xpath('//a'):
- r = {
- 'title': u' '.join(img.xpath('.//div[class="rg_ilmbg"]//text()')),
+ title_parts = element.xpath('./cite//following-sibling::*/text()')
+ title_parts.extend(element.xpath('./cite//following-sibling::text()')[:-1])
+
+ result = {
+ 'title': ''.join(title_parts),
'content': '',
'template': 'images.html',
+ 'url': source_url,
+ 'img_src': source_url,
+ 'thumbnail_src': next(iter(link.xpath('.//img //@src')), None)
}
- url = urlparse(img.xpath('.//@href')[0])
- query = parse_qs(url.query)
- r['url'] = query['imgrefurl'][0]
- r['img_src'] = query['imgurl'][0]
- r['thumbnail_src'] = r['img_src']
- # append result
- results.append(r)
-
- # return results
+
+ if not source_url or not result['thumbnail_src']:
+ continue
+
+ results.append(result)
return results