summaryrefslogtreecommitdiff
path: root/searx/engines/vimeo.py
blob: d46468d8d37d369b30c5d4c3810896c6bae3572f (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
# SPDX-License-Identifier: AGPL-3.0-or-later
"""
 Wikipedia (Web
"""

from urllib.parse import urlencode
from json import loads
from dateutil import parser

from searx.utils import extr

# about
about = {
    "website": 'https://vimeo.com/',
    "wikidata_id": 'Q156376',
    "official_api_documentation": 'http://developer.vimeo.com/api',
    "use_official_api": False,
    "require_api_key": False,
    "results": 'HTML',
}

# engine dependent config
categories = ['videos']
paging = True

# search-url
base_url = 'https://vimeo.com/'
search_url = base_url + 'search/page:{pageno}?{query}'


# do search-request
def request(query, params):
    params['url'] = search_url.format(pageno=params['pageno'], query=urlencode({'q': query}))

    return params


# get response from search-request
def response(resp):
    results = []

    data = loads(extr(resp.text, 'var data = ', ';\n'))

    # parse results
    for result in data['filtered']['data']:
        result = result[result['type']]
        videoid = result['uri'].split('/')[-1]
        url = base_url + videoid
        title = result['name']
        thumbnail = result['pictures']['sizes'][-1]['link']
        publishedDate = parser.parse(result['created_time'])

        # append result
        results.append(
            {
                'url': url,
                'title': title,
                'content': '',
                'template': 'videos.html',
                'publishedDate': publishedDate,
                'iframe_src': "https://player.vimeo.com/video/" + videoid,
                'thumbnail': thumbnail,
            }
        )

    # return results
    return results