summaryrefslogtreecommitdiff
path: root/searx/engines/digg.py
blob: defcacd2067af24a6afe4c6a875eef3986c34ac1 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
# SPDX-License-Identifier: AGPL-3.0-or-later
"""
 Digg (News, Social media)
"""
# pylint: disable=missing-function-docstring

from json import loads
from urllib.parse import urlencode
from datetime import datetime

from lxml import html

# about
about = {
    "website": 'https://digg.com',
    "wikidata_id": 'Q270478',
    "official_api_documentation": None,
    "use_official_api": False,
    "require_api_key": False,
    "results": 'HTML',
}

# engine dependent config
categories = ['news', 'social media']
paging = True
base_url = 'https://digg.com'

# search-url
search_url = base_url + (
    '/api/search/'
    '?{query}'
    '&from={position}'
    '&size=20'
    '&format=html'
)

def request(query, params):
    offset = (params['pageno'] - 1) * 20
    params['url'] = search_url.format(
        query = urlencode({'q': query}),
        position = offset,
    )
    return params

def response(resp):
    results = []

    # parse results
    for result in loads(resp.text)['mapped']:

        # strip html tags and superfluous quotation marks from content
        content = html.document_fromstring(
            result['excerpt']
        ).text_content()

        # 'created': {'ISO': '2020-10-16T14:09:55Z', ...}
        published = datetime.strptime(
            result['created']['ISO'], '%Y-%m-%dT%H:%M:%SZ'
        )
        results.append({
            'url': result['url'],
            'title': result['title'],
            'content' : content,
            'template': 'videos.html',
            'publishedDate': published,
            'thumbnail': result['images']['thumbImage'],
        })

    return results