summaryrefslogtreecommitdiff
path: root/searx/engines/annas_archive.py
blob: c845d67c6d4a9c8c654d224feee8cce78c3a4618 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""Anna's Archive

"""
from typing import List, Dict, Any, Optional
from urllib.parse import quote
from lxml import html

from searx.utils import extract_text, eval_xpath, eval_xpath_list
from searx.enginelib.traits import EngineTraits

# about
about: Dict[str, Any] = {
    "website": "https://annas-archive.org/",
    "wikidata_id": "Q115288326",
    "official_api_documentation": None,
    "use_official_api": False,
    "require_api_key": False,
    "results": "HTML",
}

# engine dependent config
categories: List[str] = ["files"]
paging: bool = False

# search-url
base_url: str = "https://annas-archive.org"

# xpath queries
xpath_results: str = '//main//a[starts-with(@href,"/md5")]'
xpath_url: str = ".//@href"
xpath_title: str = ".//h3/text()[1]"
xpath_authors: str = './/div[contains(@class, "italic")]'
xpath_publisher: str = './/div[contains(@class, "text-sm")]'
xpath_file_info: str = './/div[contains(@class, "text-xs")]'


def request(query, params: Dict[str, Any]) -> Dict[str, Any]:
    search_url: str = base_url + "/search?q={search_query}&lang={lang}"
    lang: str = ""
    if params["language"] != "all":
        lang = params["language"]

    params["url"] = search_url.format(search_query=quote(query), lang=lang)
    return params


def response(resp) -> List[Dict[str, Optional[str]]]:
    results: List[Dict[str, Optional[str]]] = []
    dom = html.fromstring(resp.text)

    for item in dom.xpath(xpath_results):
        result: Dict[str, Optional[str]] = {}

        result["url"] = base_url + item.xpath(xpath_url)[0]

        result["title"] = extract_text(eval_xpath(item, xpath_title))

        result["content"] = "{publisher}. {authors}. {file_info}".format(
            authors=extract_text(eval_xpath(item, xpath_authors)),
            publisher=extract_text(eval_xpath(item, xpath_publisher)),
            file_info=extract_text(eval_xpath(item, xpath_file_info)),
        )

        results.append(result)

    return results


def fetch_traits(engine_traits: EngineTraits):
    """Fetch languages and other search arguments from Anna's search form."""
    # pylint: disable=import-outside-toplevel

    import babel
    from searx.network import get  # see https://github.com/searxng/searxng/issues/762
    from searx.locales import language_tag

    engine_traits.all_locale = ''
    engine_traits.custom['content'] = []
    engine_traits.custom['ext'] = []
    engine_traits.custom['sort'] = []

    resp = get(base_url + '/search')
    if not resp.ok:  # type: ignore
        raise RuntimeError("Response from Anna's search page is not OK.")
    dom = html.fromstring(resp.text)  # type: ignore

    # supported language codes

    lang_map = {}
    for x in eval_xpath_list(dom, "//form//select[@name='lang']//option"):
        eng_lang = x.get("value")
        if eng_lang in ('', '_empty', 'nl-BE', 'und'):
            continue
        try:
            locale = babel.Locale.parse(lang_map.get(eng_lang, eng_lang), sep='-')
        except babel.UnknownLocaleError:
            # silently ignore unknown languages
            # print("ERROR: %s -> %s is unknown by babel" % (x.get("data-name"), eng_lang))
            continue
        sxng_lang = language_tag(locale)
        conflict = engine_traits.languages.get(sxng_lang)
        if conflict:
            if conflict != eng_lang:
                print("CONFLICT: babel %s --> %s, %s" % (sxng_lang, conflict, eng_lang))
            continue
        engine_traits.languages[sxng_lang] = eng_lang

    for x in eval_xpath_list(dom, "//form//select[@name='content']//option"):
        engine_traits.custom['content'].append(x.get("value"))

    for x in eval_xpath_list(dom, "//form//select[@name='ext']//option"):
        engine_traits.custom['ext'].append(x.get("value"))

    for x in eval_xpath_list(dom, "//form//select[@name='sort']//option"):
        engine_traits.custom['sort'].append(x.get("value"))