summaryrefslogtreecommitdiff
path: root/searx/engines/semantic_scholar.py
blob: bda7310477a17425f0a8d70693793bddc803acf2 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""Semantic Scholar (Science)
"""

from json import dumps, loads
from datetime import datetime

about = {
    "website": 'https://www.semanticscholar.org/',
    "wikidata_id": 'Q22908627',
    "official_api_documentation": 'https://api.semanticscholar.org/',
    "use_official_api": True,
    "require_api_key": False,
    "results": 'JSON',
}

paging = True
search_url = 'https://www.semanticscholar.org/api/1/search'
paper_url = 'https://www.semanticscholar.org/paper'


def request(query, params):
    params['url'] = search_url
    params['method'] = 'POST'
    params['headers']['content-type'] = 'application/json'
    params['data'] = dumps(
        {
            "queryString": query,
            "page": params['pageno'],
            "pageSize": 10,
            "sort": "relevance",
            "useFallbackRankerService": False,
            "useFallbackSearchCluster": False,
            "getQuerySuggestions": False,
            "authors": [],
            "coAuthors": [],
            "venues": [],
            "performTitleMatch": True,
        }
    )
    return params


def response(resp):
    res = loads(resp.text)
    results = []

    for result in res['results']:
        item = {}
        metadata = []

        url = result.get('primaryPaperLink', {}).get('url')
        if not url and result.get('links'):
            url = result.get('links')[0]
        if not url:
            alternatePaperLinks = result.get('alternatePaperLinks')
            if alternatePaperLinks:
                url = alternatePaperLinks[0].get('url')
        if not url:
            url = paper_url + '/%s' % result['id']

        item['url'] = url

        item['title'] = result['title']['text']
        item['content'] = result['paperAbstract']['text']

        metadata = result.get('fieldsOfStudy') or []
        venue = result.get('venue', {}).get('text')
        if venue:
            metadata.append(venue)
        if metadata:
            item['metadata'] = ', '.join(metadata)

        pubDate = result.get('pubDate')
        if pubDate:
            item['publishedDate'] = datetime.strptime(pubDate, "%Y-%m-%d")

        results.append(item)

    return results