summaryrefslogtreecommitdiff
path: root/searx/engines/peertube.py
blob: 1ace14027ad86d88c0eafb1f21b23ca870ac9e31 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
# SPDX-License-Identifier: AGPL-3.0-or-later
"""
 peertube (Videos)
"""

from json import loads
from datetime import datetime
from urllib.parse import urlencode
from searx.utils import html_to_text

# about
about = {
    "website": 'https://joinpeertube.org',
    "wikidata_id": 'Q50938515',
    "official_api_documentation": 'https://docs.joinpeertube.org/api-rest-reference.html',
    "use_official_api": True,
    "require_api_key": False,
    "results": 'JSON',
}

# engine dependent config
categories = ["videos"]
paging = True
base_url = "https://peer.tube"
supported_languages_url = (
    'https://framagit.org/framasoft/peertube/search-index/-/raw/master/client/src/views/Search.vue'
)


# do search-request
def request(query, params):
    sanitized_url = base_url.rstrip("/")
    pageno = (params["pageno"] - 1) * 15
    search_url = sanitized_url + "/api/v1/search/videos/?pageno={pageno}&{query}"
    query_dict = {"search": query}
    language = params["language"].split("-")[0]
    if "all" != language and language in supported_languages:
        query_dict["languageOneOf"] = language
    params["url"] = search_url.format(query=urlencode(query_dict), pageno=pageno)
    return params


def _get_offset_from_pageno(pageno):
    return (pageno - 1) * 15 + 1


# get response from search-request
def response(resp):
    sanitized_url = base_url.rstrip("/")
    results = []

    search_res = loads(resp.text)

    embedded_url = (
        '<iframe width="560" height="315" sandbox="allow-same-origin allow-scripts allow-popups" '
        + 'src="'
        + sanitized_url
        + '{embed_path}" frameborder="0" allowfullscreen></iframe>'
    )
    # return empty array if there are no results
    if "data" not in search_res:
        return []

    # parse results
    for res in search_res["data"]:
        title = res["name"]
        url = sanitized_url + "/videos/watch/" + res["uuid"]
        description = res["description"]
        if description:
            content = html_to_text(res["description"])
        else:
            content = ""
        thumbnail = sanitized_url + res["thumbnailPath"]
        publishedDate = datetime.strptime(res["publishedAt"], "%Y-%m-%dT%H:%M:%S.%fZ")
        embedded = embedded_url.format(embed_path=res["embedPath"])

        results.append(
            {
                "template": "videos.html",
                "url": url,
                "title": title,
                "content": content,
                "publishedDate": publishedDate,
                "embedded": embedded,
                "thumbnail": thumbnail,
            }
        )

    # return results
    return results


def _fetch_supported_languages(resp):
    import re

    # https://docs.python.org/3/howto/regex.html#greedy-versus-non-greedy
    videolanguages = re.search(r"videoLanguages \(\)[^\n]+(.*?)\]", resp.text, re.DOTALL)
    peertube_languages = [m.group(1) for m in re.finditer(r"\{ id: '([a-z]+)', label:", videolanguages.group(1))]
    return peertube_languages