summaryrefslogtreecommitdiff
path: root/searx
diff options
context:
space:
mode:
authorGuilhem Bonnefille <guilhem.bonnefille@gmail.com>2016-04-03 22:03:41 +0200
committerGuilhem Bonnefille <guilhem.bonnefille@gmail.com>2016-04-03 22:03:41 +0200
commitcf09b500f35fd1bca3fc9cc853bd7ea932220e4e (patch)
treebc0b56378caed891646d8d35457d1871edb409bd /searx
parent5cbe4c53329a1fd2b949660fda25ff7a4ce6f254 (diff)
downloadsearxng-cf09b500f35fd1bca3fc9cc853bd7ea932220e4e.tar.gz
searxng-cf09b500f35fd1bca3fc9cc853bd7ea932220e4e.zip
Add support for dokuwiki engine
Diffstat (limited to 'searx')
-rw-r--r--searx/engines/doku.py83
1 files changed, 83 insertions, 0 deletions
diff --git a/searx/engines/doku.py b/searx/engines/doku.py
new file mode 100644
index 000000000..18abe75e5
--- /dev/null
+++ b/searx/engines/doku.py
@@ -0,0 +1,83 @@
+# Doku Wiki
+#
+# @website https://www.dokuwiki.org/
+# @provide-api yes
+# (https://www.dokuwiki.org/devel:xmlrpc)
+#
+# @using-api no
+# @results HTML
+# @stable yes
+# @parse (general) url, title, content
+
+from urllib import urlencode
+from lxml.html import fromstring
+from searx.engines.xpath import extract_text
+
+# engine dependent config
+categories = ['general'] # TODO , 'images', 'music', 'videos', 'files'
+paging = False
+language_support = False
+number_of_results = 5
+
+# search-url
+# Doku is OpenSearch compatible
+base_url = 'http://localhost:8090'
+search_url = '/?do=search'\
+ '&id={query}'
+# TODO '&startRecord={offset}'\
+# TODO '&maximumRecords={limit}'\
+
+# do search-request
+def request(query, params):
+
+ params['url'] = base_url +\
+ search_url.format(query=urlencode({'query': query}))
+
+ return params
+
+
+# get response from search-request
+def response(resp):
+ results = []
+
+ doc = fromstring(resp.text)
+
+ # parse results
+ # Quickhits
+ for r in doc.xpath('//div[@class="search_quickresult"]/ul/li'):
+ try:
+ res_url = r.xpath('.//a[@class="wikilink1"]/@href')[-1]
+ except:
+ continue
+
+ if not res_url:
+ continue
+
+ title = extract_text(r.xpath('.//a[@class="wikilink1"]/@title'))
+
+ # append result
+ results.append({'title': title,
+ 'content': "",
+ 'url': base_url + res_url})
+
+ # Search results
+ for r in doc.xpath('//dl[@class="search_results"]/*'):
+ try:
+ if r.tag == "dt":
+ res_url = r.xpath('.//a[@class="wikilink1"]/@href')[-1]
+ title = extract_text(r.xpath('.//a[@class="wikilink1"]/@title'))
+ elif r.tag == "dd":
+ content = extract_text(r.xpath('.'))
+
+ # append result
+ results.append({'title': title,
+ 'content': content,
+ 'url': base_url + res_url})
+ except:
+ continue
+
+ if not res_url:
+ continue
+
+ # return results
+ return results