summaryrefslogtreecommitdiff
path: root/searx/engines/core.py
blob: 96543308aacac9105db05f1120bfc4851c81e030 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""CORE (science)

"""

from json import loads
from datetime import datetime
from urllib.parse import urlencode

from searx.exceptions import SearxEngineAPIException

about = {
    "website": 'https://core.ac.uk',
    "wikidata_id": 'Q22661180',
    "official_api_documentation": 'https://core.ac.uk/documentation/api/',
    "use_official_api": True,
    "require_api_key": True,
    "results": 'JSON',
}

categories = ['science']
paging = True
nb_per_page = 10

api_key = 'unset'

base_url = 'https://core.ac.uk:443/api-v2/search/'
search_string = '{query}?page={page}&pageSize={nb_per_page}&apiKey={apikey}'


def request(query, params):

    if api_key == 'unset':
        raise SearxEngineAPIException('missing CORE API key')

    search_path = search_string.format(
        query=urlencode({'q': query}),
        nb_per_page=nb_per_page,
        page=params['pageno'],
        apikey=api_key,
    )
    params['url'] = base_url + search_path

    logger.debug("query_url --> %s", params['url'])
    return params


def response(resp):
    results = []
    json_data = loads(resp.text)

    for result in json_data['data']:

        source = result['_source']
        if not source['urls']:
            continue

        time = source['publishedDate'] or source['depositedDate']
        if time:
            date = datetime.fromtimestamp(time / 1000)
        else:
            date = None

        metadata = []
        if source['publisher'] and len(source['publisher']) > 3:
            metadata.append(source['publisher'])
        if source['topics']:
            metadata.append(source['topics'][0])
        if source['doi']:
            metadata.append(source['doi'])
        metadata = ' / '.join(metadata)

        results.append(
            {
                'url': source['urls'][0].replace('http://', 'https://', 1),
                'title': source['title'],
                'content': source['description'],
                'publishedDate': date,
                'metadata': metadata,
            }
        )

    return results