summaryrefslogtreecommitdiff
path: root/searx/engines/wikipedia.py
blob: fdba5ed68e4699485b2be939899d3ebfa367975d (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
"""
 Wikipedia (Web)

 @website     https://{language}.wikipedia.org
 @provide-api yes

 @using-api   yes
 @results     JSON
 @stable      yes
 @parse       url, infobox
"""

from json import loads
from urllib import urlencode, quote

supported_languages = ["en", "sv", "ceb", "de", "nl", "fr", "ru", "it", "es", "war",
                       "pl", "vi", "ja", "pt", "zh", "uk", "ca", "fa", "no", "sh",
                       "ar", "fi", "hu", "id", "ro", "cs", "ko", "sr", "ms", "tr",
                       "eu", "eo", "min", "bg", "da", "kk", "sk", "hy", "he", "zh-min-nan",
                       "lt", "hr", "sl", "et", "ce", "gl", "nn", "uz", "la", "vo",
                       "el", "simple", "be", "az", "th", "ur", "ka", "hi", "oc", "ta",
                       "mk", "mg", "new", "lv", "cy", "bs", "tt", "tl", "te", "pms",
                       "be-tarask", "br", "sq", "ky", "ht", "jv", "tg", "ast", "zh-yue", "lb",
                       "mr", "ml", "bn", "pnb", "is", "af", "sco", "ga", "ba", "fy",
                       "cv", "lmo", "sw", "my", "an", "yo", "ne", "io", "gu", "nds",
                       "scn", "bpy", "pa", "ku", "als", "kn", "bar", "ia", "qu", "su",
                       "ckb", "bat-smg", "mn", "arz", "nap", "wa", "bug", "gd", "yi", "map-bms",
                       "am", "mzn", "fo", "si", "nah", "li", "sah", "vec", "hsb", "or",
                       "os", "mrj", "sa", "hif", "mhr", "roa-tara", "azb", "pam", "ilo",
                       "sd", "ps", "se", "mi", "bh", "eml", "bcl", "xmf", "diq", "hak",
                       "gan", "glk", "vls", "nds-nl", "rue", "bo", "fiu-vro", "co", "sc",
                       "tk", "csb", "lrc", "vep", "wuu", "km", "szl", "gv", "crh", "kv",
                       "zh-classical", "frr", "zea", "as", "so", "kw", "nso", "ay", "stq",
                       "udm", "cdo", "nrm", "ie", "koi", "rm", "pcd", "myv", "mt", "fur",
                       "ace", "lad", "gn", "lij", "dsb", "dv", "cbk-zam", "ext", "gom",
                       "kab", "ksh", "ang", "mai", "mwl", "lez", "gag", "ln", "ug", "pi",
                       "pag", "frp", "sn", "nv", "av", "pfl", "haw", "xal", "krc", "kaa",
                       "rw", "bxr", "pdc", "to", "kl", "nov", "arc", "kbd", "lo", "bjn",
                       "pap", "ha", "tet", "ki", "tyv", "tpi", "na", "lbe", "ig", "jbo",
                       "roa-rup", "ty", "jam", "za", "kg", "mdf", "lg", "wo", "srn", "ab",
                       "ltg", "zu", "sm", "chr", "om", "tn", "chy", "rmy", "cu", "tw", "tum",
                       "xh", "bi", "rn", "pih", "got", "ss", "pnt", "bm", "ch", "mo", "ts",
                       "ady", "iu", "st", "ee", "ny", "fj", "ks", "ak", "ik", "sg", "ve",
                       "dz", "ff", "ti", "cr", "ng", "cho", "kj", "mh", "ho", "ii", "aa", "mus", "hz", "kr"]

# search-url
base_url = 'https://{language}.wikipedia.org/'
search_postfix = 'w/api.php?'\
    'action=query'\
    '&format=json'\
    '&{query}'\
    '&prop=extracts|pageimages'\
    '&exintro'\
    '&explaintext'\
    '&pithumbsize=300'\
    '&redirects'


# set language in base_url
def url_lang(lang):
    lang = lang.split('-')[0]
    if lang == 'all' or lang not in supported_languages:
        language = 'en'
    else:
        language = lang

    return base_url.format(language=language)


# do search-request
def request(query, params):
    if query.islower():
        query += '|' + query.title()

    params['url'] = url_lang(params['language']) \
        + search_postfix.format(query=urlencode({'titles': query}))

    return params


# get first meaningful paragraph
# this should filter out disambiguation pages and notes above first paragraph
# "magic numbers" were obtained by fine tuning
def extract_first_paragraph(content, title, image):
    first_paragraph = None

    failed_attempts = 0
    for paragraph in content.split('\n'):

        starts_with_title = paragraph.lower().find(title.lower(), 0, len(title) + 35)
        length = len(paragraph)

        if length >= 200 or (starts_with_title >= 0 and (image or length >= 150)):
            first_paragraph = paragraph
            break

        failed_attempts += 1
        if failed_attempts > 3:
            return None

    return first_paragraph


# get response from search-request
def response(resp):
    results = []

    search_result = loads(resp.content)

    # wikipedia article's unique id
    # first valid id is assumed to be the requested article
    for article_id in search_result['query']['pages']:
        page = search_result['query']['pages'][article_id]
        if int(article_id) > 0:
            break

    if int(article_id) < 0:
        return []

    title = page.get('title')

    image = page.get('thumbnail')
    if image:
        image = image.get('source')

    extract = page.get('extract')

    summary = extract_first_paragraph(extract, title, image)
    if not summary:
        return []

    # link to wikipedia article
    wikipedia_link = url_lang(resp.search_params['language']) \
        + 'wiki/' + quote(title.replace(' ', '_').encode('utf8'))

    results.append({'url': wikipedia_link, 'title': title})

    results.append({'infobox': title,
                    'id': wikipedia_link,
                    'content': summary,
                    'img_src': image,
                    'urls': [{'title': 'Wikipedia', 'url': wikipedia_link}]})

    return results