summaryrefslogtreecommitdiff
path: root/searx/engines/deviantart.py
diff options
context:
space:
mode:
authorBnyro <bnyro@tutanota.com>2023-09-08 12:08:14 +0200
committerMarkus Heiser <markus.heiser@darmarIT.de>2023-09-11 13:22:36 +0200
commite73a6f5d14f4f790cbbf318d271895017ee48b94 (patch)
tree20a6c949692a09e781baa1f6ea6306c32682a1d6 /searx/engines/deviantart.py
parent1f7366060eff50217486d562f371eeec04da5b82 (diff)
downloadsearxng-e73a6f5d14f4f790cbbf318d271895017ee48b94.tar.gz
searxng-e73a6f5d14f4f790cbbf318d271895017ee48b94.zip
[fix] engine deviantart: review of the result-scrapper
The deviantart site changed and hence deviantart is currently unusable.
Diffstat (limited to 'searx/engines/deviantart.py')
-rw-r--r--searx/engines/deviantart.py93
1 files changed, 50 insertions, 43 deletions
diff --git a/searx/engines/deviantart.py b/searx/engines/deviantart.py
index e44ac28e5..fa88d9389 100644
--- a/searx/engines/deviantart.py
+++ b/searx/engines/deviantart.py
@@ -1,12 +1,14 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
-"""
- Deviantart (Images)
+"""Deviantart (Images)
+
"""
-from urllib.parse import urlencode
+import urllib.parse
from lxml import html
+from searx.utils import extract_text, eval_xpath, eval_xpath_list
+
# about
about = {
"website": 'https://www.deviantart.com/',
@@ -20,31 +22,30 @@ about = {
# engine dependent config
categories = ['images']
paging = True
-time_range_support = True
-
-time_range_dict = {
- 'day': 'popular-24-hours',
- 'week': 'popular-1-week',
- 'month': 'popular-1-month',
- 'year': 'most-recent',
-}
# search-url
base_url = 'https://www.deviantart.com'
+results_xpath = '//div[@class="_2pZkk"]/div/div/a'
+url_xpath = './@href'
+thumbnail_src_xpath = './div/img/@src'
+img_src_xpath = './div/img/@srcset'
+title_xpath = './@aria-label'
+premium_xpath = '../div/div/div/text()'
+premium_keytext = 'Watch the artist to view this deviation'
+cursor_xpath = '(//a[@class="_1OGeq"]/@href)[last()]'
-def request(query, params):
- # https://www.deviantart.com/search/deviations?page=5&q=foo
+def request(query, params):
- query = {
- 'page': params['pageno'],
- 'q': query,
- }
- if params['time_range'] in time_range_dict:
- query['order'] = time_range_dict[params['time_range']]
+ # https://www.deviantart.com/search?q=foo
- params['url'] = base_url + '/search/deviations?' + urlencode(query)
+ nextpage_url = params['engine_data'].get('nextpage')
+ # don't use nextpage when user selected to jump back to page 1
+ if params['pageno'] > 1 and nextpage_url is not None:
+ params['url'] = nextpage_url
+ else:
+ params['url'] = f"{base_url}/search?{urllib.parse.urlencode({'q': query})}"
return params
@@ -52,30 +53,36 @@ def request(query, params):
def response(resp):
results = []
-
dom = html.fromstring(resp.text)
- for row in dom.xpath('//div[contains(@data-hook, "content_row")]'):
- for result in row.xpath('./div'):
-
- a_tag = result.xpath('.//a[@data-hook="deviation_link"]')[0]
- noscript_tag = a_tag.xpath('.//noscript')
-
- if noscript_tag:
- img_tag = noscript_tag[0].xpath('.//img')
- else:
- img_tag = a_tag.xpath('.//img')
- if not img_tag:
- continue
- img_tag = img_tag[0]
-
- results.append(
- {
- 'template': 'images.html',
- 'url': a_tag.attrib.get('href'),
- 'img_src': img_tag.attrib.get('src'),
- 'title': img_tag.attrib.get('alt'),
- }
- )
+ for result in eval_xpath_list(dom, results_xpath):
+ # skip images that are blurred
+ _text = extract_text(eval_xpath(result, premium_xpath))
+ if _text and premium_keytext in _text:
+ continue
+ img_src = extract_text(eval_xpath(result, img_src_xpath))
+ if img_src:
+ img_src = img_src.split(' ')[0]
+ parsed_url = urllib.parse.urlparse(img_src)
+ img_src = parsed_url._replace(path=parsed_url.path.split('/v1')[0]).geturl()
+
+ results.append(
+ {
+ 'template': 'images.html',
+ 'url': extract_text(eval_xpath(result, url_xpath)),
+ 'img_src': img_src,
+ 'thumbnail_src': extract_text(eval_xpath(result, thumbnail_src_xpath)),
+ 'title': extract_text(eval_xpath(result, title_xpath)),
+ }
+ )
+
+ nextpage_url = extract_text(eval_xpath(dom, cursor_xpath))
+ if nextpage_url:
+ results.append(
+ {
+ 'engine_data': nextpage_url.replace("http://", "https://"),
+ 'key': 'nextpage',
+ }
+ )
return results