diff options
author | Adam Tauber <asciimoo@gmail.com> | 2017-11-01 21:27:57 +0100 |
---|---|---|
committer | GitHub <noreply@github.com> | 2017-11-01 21:27:57 +0100 |
commit | 3d50b0288dc2ba42baf550353f3fb5bee6462754 (patch) | |
tree | 3931e34b8501cd58db58cb651b0193ee46c23eb8 | |
parent | 6d28e9d6945b5510b3d861e20521554435a10f63 (diff) | |
parent | 5954a8e16a64a369072a7487f62b6396a451ae5f (diff) | |
download | searxng-3d50b0288dc2ba42baf550353f3fb5bee6462754.tar.gz searxng-3d50b0288dc2ba42baf550353f3fb5bee6462754.zip |
Merge pull request #1075 from kvch/finish-jibe-b-engines
Finish PRs of @jibe-b: pubmed, oa_doi_rewrite, openaire, arxiv
-rw-r--r-- | searx/engines/arxiv.py | 76 | ||||
-rwxr-xr-x | searx/engines/base.py | 2 | ||||
-rw-r--r-- | searx/engines/pubmed.py | 98 | ||||
-rw-r--r-- | searx/plugins/__init__.py | 4 | ||||
-rw-r--r-- | searx/plugins/oa_doi_rewrite.py (renamed from searx/plugins/doai_rewrite.py) | 16 | ||||
-rw-r--r-- | searx/preferences.py | 5 | ||||
-rw-r--r-- | searx/settings.yml | 31 | ||||
-rw-r--r-- | searx/templates/oscar/preferences.html | 12 | ||||
-rw-r--r-- | searx/webapp.py | 8 | ||||
-rw-r--r-- | tests/unit/engines/pubmed.py | 37 | ||||
-rw-r--r-- | tests/unit/engines/test_arxiv.py | 58 | ||||
-rw-r--r-- | tests/unit/engines/test_base.py | 6 |
12 files changed, 343 insertions, 10 deletions
diff --git a/searx/engines/arxiv.py b/searx/engines/arxiv.py new file mode 100644 index 000000000..5ef84f0c1 --- /dev/null +++ b/searx/engines/arxiv.py @@ -0,0 +1,76 @@ +#!/usr/bin/env python + +""" + ArXiV (Scientific preprints) + @website https://arxiv.org + @provide-api yes (export.arxiv.org/api/query) + @using-api yes + @results XML-RSS + @stable yes + @parse url, title, publishedDate, content + More info on api: https://arxiv.org/help/api/user-manual +""" + +from lxml import html +from datetime import datetime +from searx.url_utils import urlencode + + +categories = ['science'] + +base_url = 'http://export.arxiv.org/api/query?search_query=all:'\ + + '{query}&start={offset}&max_results={number_of_results}' + +# engine dependent config +number_of_results = 10 + + +def request(query, params): + # basic search + offset = (params['pageno'] - 1) * number_of_results + + string_args = dict(query=query, + offset=offset, + number_of_results=number_of_results) + + params['url'] = base_url.format(**string_args) + + return params + + +def response(resp): + results = [] + + dom = html.fromstring(resp.content) + search_results = dom.xpath('//entry') + + for entry in search_results: + title = entry.xpath('.//title')[0].text + + url = entry.xpath('.//id')[0].text + + content_string = '{doi_content}{abstract_content}' + + abstract = entry.xpath('.//summary')[0].text + + # If a doi is available, add it to the snipppet + try: + doi_content = entry.xpath('.//link[@title="doi"]')[0].text + content = content_string.format(doi_content=doi_content, abstract_content=abstract) + except: + content = content_string.format(doi_content="", abstract_content=abstract) + + if len(content) > 300: + content = content[0:300] + "..." + # TODO: center snippet on query term + + publishedDate = datetime.strptime(entry.xpath('.//published')[0].text, '%Y-%m-%dT%H:%M:%SZ') + + res_dict = {'url': url, + 'title': title, + 'publishedDate': publishedDate, + 'content': content} + + results.append(res_dict) + + return results diff --git a/searx/engines/base.py b/searx/engines/base.py index ff006a3bc..be0b7d247 100755 --- a/searx/engines/base.py +++ b/searx/engines/base.py @@ -73,7 +73,7 @@ def request(query, params): def response(resp): results = [] - search_results = etree.XML(resp.text) + search_results = etree.XML(resp.content) for entry in search_results.xpath('./result/doc'): content = "No description available" diff --git a/searx/engines/pubmed.py b/searx/engines/pubmed.py new file mode 100644 index 000000000..6451f1467 --- /dev/null +++ b/searx/engines/pubmed.py @@ -0,0 +1,98 @@ +#!/usr/bin/env python + +""" + PubMed (Scholar publications) + @website https://www.ncbi.nlm.nih.gov/pubmed/ + @provide-api yes (https://www.ncbi.nlm.nih.gov/home/develop/api/) + @using-api yes + @results XML + @stable yes + @parse url, title, publishedDate, content + More info on api: https://www.ncbi.nlm.nih.gov/books/NBK25501/ +""" + +from flask_babel import gettext +from lxml import etree +from datetime import datetime +from searx.url_utils import urlencode +from searx.poolrequests import get + + +categories = ['science'] + +base_url = 'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi'\ + + '?db=pubmed&{query}&retstart={offset}&retmax={hits}' + +# engine dependent config +number_of_results = 10 +pubmed_url = 'https://www.ncbi.nlm.nih.gov/pubmed/' + + +def request(query, params): + # basic search + offset = (params['pageno'] - 1) * number_of_results + + string_args = dict(query=urlencode({'term': query}), + offset=offset, + hits=number_of_results) + + params['url'] = base_url.format(**string_args) + + return params + + +def response(resp): + results = [] + + # First retrieve notice of each result + pubmed_retrieve_api_url = 'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?'\ + + 'db=pubmed&retmode=xml&id={pmids_string}' + + pmids_results = etree.XML(resp.content) + pmids = pmids_results.xpath('//eSearchResult/IdList/Id') + pmids_string = '' + + for item in pmids: + pmids_string += item.text + ',' + + retrieve_notice_args = dict(pmids_string=pmids_string) + + retrieve_url_encoded = pubmed_retrieve_api_url.format(**retrieve_notice_args) + + search_results_xml = get(retrieve_url_encoded).content + search_results = etree.XML(search_results_xml).xpath('//PubmedArticleSet/PubmedArticle/MedlineCitation') + + for entry in search_results: + title = entry.xpath('.//Article/ArticleTitle')[0].text + + pmid = entry.xpath('.//PMID')[0].text + url = pubmed_url + pmid + + try: + content = entry.xpath('.//Abstract/AbstractText')[0].text + except: + content = gettext('No abstract is available for this publication.') + + # If a doi is available, add it to the snipppet + try: + doi = entry.xpath('.//ELocationID[@EIdType="doi"]')[0].text + content = 'DOI: {doi} Abstract: {content}'.format(doi=doi, content=content) + except: + pass + + if len(content) > 300: + content = content[0:300] + "..." + # TODO: center snippet on query term + + publishedDate = datetime.strptime(entry.xpath('.//DateCreated/Year')[0].text + + '-' + entry.xpath('.//DateCreated/Month')[0].text + + '-' + entry.xpath('.//DateCreated/Day')[0].text, '%Y-%m-%d') + + res_dict = {'url': url, + 'title': title, + 'publishedDate': publishedDate, + 'content': content} + + results.append(res_dict) + + return results diff --git a/searx/plugins/__init__.py b/searx/plugins/__init__.py index 46c1f8918..4dbcbbd28 100644 --- a/searx/plugins/__init__.py +++ b/searx/plugins/__init__.py @@ -22,7 +22,7 @@ if version_info[0] == 3: logger = logger.getChild('plugins') -from searx.plugins import (doai_rewrite, +from searx.plugins import (oa_doi_rewrite, https_rewrite, infinite_scroll, open_results_on_new_tab, @@ -78,7 +78,7 @@ class PluginStore(): plugins = PluginStore() -plugins.register(doai_rewrite) +plugins.register(oa_doi_rewrite) plugins.register(https_rewrite) plugins.register(infinite_scroll) plugins.register(open_results_on_new_tab) diff --git a/searx/plugins/doai_rewrite.py b/searx/plugins/oa_doi_rewrite.py index b7b814fac..b62ef0b1e 100644 --- a/searx/plugins/doai_rewrite.py +++ b/searx/plugins/oa_doi_rewrite.py @@ -1,14 +1,18 @@ from flask_babel import gettext import re from searx.url_utils import urlparse, parse_qsl +from searx import settings + regex = re.compile(r'10\.\d{4,9}/[^\s]+') -name = gettext('DOAI rewrite') +name = gettext('Open Access DOI rewrite') description = gettext('Avoid paywalls by redirecting to open-access versions of publications when available') default_on = False preference_section = 'privacy' +doi_resolvers = settings['doi_resolvers'] + def extract_doi(url): match = regex.search(url.path) @@ -21,12 +25,20 @@ def extract_doi(url): return None +def get_doi_resolver(args, preference_doi_resolver): + doi_resolvers = settings['doi_resolvers'] + doi_resolver = args.get('doi_resolver', preference_doi_resolver)[0] + if doi_resolver not in doi_resolvers: + doi_resolvers = settings['default_doi_resolver'] + return doi_resolver + + def on_result(request, search, result): doi = extract_doi(result['parsed_url']) if doi and len(doi) < 50: for suffix in ('/', '.pdf', '/full', '/meta', '/abstract'): if doi.endswith(suffix): doi = doi[:-len(suffix)] - result['url'] = 'http://doai.io/' + doi + result['url'] = get_doi_resolver(request.args, request.preferences.get_value('doi_resolver')) + doi result['parsed_url'] = urlparse(result['url']) return True diff --git a/searx/preferences.py b/searx/preferences.py index c2c649eea..1a143db6b 100644 --- a/searx/preferences.py +++ b/searx/preferences.py @@ -15,6 +15,7 @@ LANGUAGE_CODES = [l[0] for l in languages] LANGUAGE_CODES.append('all') DISABLED = 0 ENABLED = 1 +DOI_RESOLVERS = list(settings['doi_resolvers']) class MissingArgumentException(Exception): @@ -266,7 +267,9 @@ class Preferences(object): 'results_on_new_tab': MapSetting(False, map={'0': False, '1': True, 'False': False, - 'True': True})} + 'True': True}), + 'doi_resolver': MultipleChoiceSetting(['oadoi.org'], choices=DOI_RESOLVERS), + } self.engines = EnginesSetting('engines', choices=engines) self.plugins = PluginsSetting('plugins', choices=plugins) diff --git a/searx/settings.yml b/searx/settings.yml index 6f44a3b8b..067a842e4 100644 --- a/searx/settings.yml +++ b/searx/settings.yml @@ -60,6 +60,12 @@ engines: disabled : True shortcut : ai + - name : arxiv + engine : arxiv + shortcut : arx + categories : science + timeout : 4.0 + - name : base engine : base shortcut : bs @@ -409,6 +415,18 @@ engines: shortcut : nt disabled : True + - name : openaire + engine : json_engine + paging : True + search_url : http://api.openaire.eu/search/datasets?format=json&page={pageno}&size=10&title={query} + results_query : response/results/result + url_query : metadata/oaf:entity/oaf:result/children/instance/webresource/url/$ + title_query : metadata/oaf:entity/oaf:result/title/$ + content_query : metadata/oaf:entity/oaf:result/description/$ + categories : science + shortcut : oa + timeout: 5.0 + - name : openstreetmap engine : openstreetmap shortcut : osm @@ -442,6 +460,12 @@ engines: url: https://pirateproxy.red/ timeout : 3.0 + - name : pubmed + engine : pubmed + shortcut : pub + categories: science + timeout : 3.0 + - name : qwant engine : qwant shortcut : qw @@ -694,3 +718,10 @@ locales: tr : Türkçe (Turkish) uk : українська мова (Ukrainian) zh : 中文 (Chinese) + +doi_resolvers : + oadoi.org : 'https://oadoi.org/' + doi.org : 'https://doi.org/' + doai.io : 'http://doai.io/' + +default_doi_resolver : 'oadoi.org' diff --git a/searx/templates/oscar/preferences.html b/searx/templates/oscar/preferences.html index f4b2c63ea..5f85a9af6 100644 --- a/searx/templates/oscar/preferences.html +++ b/searx/templates/oscar/preferences.html @@ -118,6 +118,18 @@ <option value="0" {% if not results_on_new_tab %}selected="selected"{% endif %}>{{ _('Off')}}</option> </select> {{ preferences_item_footer(info, label, rtl) }} + + {% set label = _('Open Access DOI resolver') %} + {% set info = _('Redirect to open-access versions of publications when available (plugin required)') %} + {{ preferences_item_header(info, label, rtl) }} + <select class="form-control" id='doi_resolver' name='doi_resolver'> + {% for doi_resolver_name,doi_resolver_url in doi_resolvers.items() %} + <option value="{{ doi_resolver_name }}" {% if doi_resolver_name == current_doi_resolver %}selected="selected"{% endif %}> + {{ doi_resolver_name }} - {{ doi_resolver_url }} + </option> + {% endfor %} + </select> + {{ preferences_item_footer(info, label, rtl) }} </div> </fieldset> </div> diff --git a/searx/webapp.py b/searx/webapp.py index dd93395ee..f81747325 100644 --- a/searx/webapp.py +++ b/searx/webapp.py @@ -66,6 +66,7 @@ from searx.search import SearchWithPlugins, get_search_query_from_webapp from searx.query import RawTextQuery from searx.autocomplete import searx_bang, backends as autocomplete_backends from searx.plugins import plugins +from searx.plugins.oa_doi_rewrite import get_doi_resolver from searx.preferences import Preferences, ValidationException from searx.answerers import answerers from searx.url_utils import urlencode, urlparse, urljoin @@ -695,6 +696,8 @@ def preferences(): shortcuts={y: x for x, y in engine_shortcuts.items()}, themes=themes, plugins=plugins, + doi_resolvers=settings['doi_resolvers'], + current_doi_resolver=get_doi_resolver(request.args, request.preferences.get_value('doi_resolver')), allowed_plugins=allowed_plugins, theme=get_current_theme_name(), preferences_url_params=request.preferences.get_as_url_params(), @@ -839,7 +842,10 @@ def config(): 'autocomplete': settings['search']['autocomplete'], 'safe_search': settings['search']['safe_search'], 'default_theme': settings['ui']['default_theme'], - 'version': VERSION_STRING}) + 'version': VERSION_STRING, + 'doi_resolvers': [r for r in search['doi_resolvers']], + 'default_doi_resolver': settings['default_doi_resolver'], + }) @app.errorhandler(404) diff --git a/tests/unit/engines/pubmed.py b/tests/unit/engines/pubmed.py new file mode 100644 index 000000000..370efe067 --- /dev/null +++ b/tests/unit/engines/pubmed.py @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- +from collections import defaultdict +import mock +from searx.engines import pubmed +from searx.testing import SearxTestCase + + +class TestPubmedEngine(SearxTestCase): + + def test_request(self): + query = 'test_query' + dicto = defaultdict(dict) + dicto['pageno'] = 1 + params = pubmed.request(query, dicto) + self.assertIn('url', params) + self.assertIn('eutils.ncbi.nlm.nih.gov/', params['url']) + self.assertIn('term', params['url']) + + def test_response(self): + self.assertRaises(AttributeError, pubmed.response, None) + self.assertRaises(AttributeError, pubmed.response, []) + self.assertRaises(AttributeError, pubmed.response, '') + self.assertRaises(AttributeError, pubmed.response, '[]') + + response = mock.Mock(text='<PubmedArticleSet></PubmedArticleSet>') + self.assertEqual(pubmed.response(response), []) + + xml_mock = """<eSearchResult><Count>1</Count><RetMax>1</RetMax><RetStart>0</RetStart><IdList> +<Id>1</Id> +</IdList></eSearchResult> +""" + + response = mock.Mock(text=xml_mock.encode('utf-8')) + results = pubmed.response(response) + self.assertEqual(type(results), list) + self.assertEqual(len(results), 1) + self.assertEqual(results[0]['content'], 'No abstract is available for this publication.') diff --git a/tests/unit/engines/test_arxiv.py b/tests/unit/engines/test_arxiv.py new file mode 100644 index 000000000..b32c0e605 --- /dev/null +++ b/tests/unit/engines/test_arxiv.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- +from collections import defaultdict +import mock +from searx.engines import arxiv +from searx.testing import SearxTestCase + + +class TestBaseEngine(SearxTestCase): + + def test_request(self): + query = 'test_query' + dicto = defaultdict(dict) + dicto['pageno'] = 1 + params = arxiv.request(query, dicto) + self.assertIn('url', params) + self.assertIn('export.arxiv.org/api/', params['url']) + + def test_response(self): + self.assertRaises(AttributeError, arxiv.response, None) + self.assertRaises(AttributeError, arxiv.response, []) + self.assertRaises(AttributeError, arxiv.response, '') + self.assertRaises(AttributeError, arxiv.response, '[]') + + response = mock.Mock(content=b'''<?xml version="1.0" encoding="UTF-8"?> +<feed xmlns="http://www.w3.org/2005/Atom"></feed>''') + self.assertEqual(arxiv.response(response), []) + + xml_mock = b'''<?xml version="1.0" encoding="UTF-8"?> +<feed xmlns="http://www.w3.org/2005/Atom"> + <title type="html">ArXiv Query: search_query=all:test_query&id_list=&start=0&max_results=1</title> + <id>http://arxiv.org/api/1</id> + <updated>2000-01-21T00:00:00-01:00</updated> + <opensearch:totalResults xmlns:opensearch="http://a9.com/-/spec/opensearch/1.1/">1</opensearch:totalResults> + <opensearch:startIndex xmlns:opensearch="http://a9.com/-/spec/opensearch/1.1/">0</opensearch:startIndex> + <opensearch:itemsPerPage xmlns:opensearch="http://a9.com/-/spec/opensearch/1.1/">1</opensearch:itemsPerPage> + <entry> + <id>http://arxiv.org/1</id> + <updated>2000-01-01T00:00:01Z</updated> + <published>2000-01-01T00:00:01Z</published> + <title>Mathematical proof.</title> + <summary>Mathematical formula.</summary> + <author> + <name>A. B.</name> + </author> + <link href="http://arxiv.org/1" rel="alternate" type="text/html"/> + <link title="pdf" href="http://arxiv.org/1" rel="related" type="application/pdf"/> + <category term="math.QA" scheme="http://arxiv.org/schemas/atom"/> + <category term="1" scheme="http://arxiv.org/schemas/atom"/> + </entry> +</feed> +''' + + response = mock.Mock(content=xml_mock) + results = arxiv.response(response) + self.assertEqual(type(results), list) + self.assertEqual(len(results), 1) + self.assertEqual(results[0]['title'], 'Mathematical proof.') + self.assertEqual(results[0]['content'], 'Mathematical formula.') diff --git a/tests/unit/engines/test_base.py b/tests/unit/engines/test_base.py index e008b034c..b5da5bde7 100644 --- a/tests/unit/engines/test_base.py +++ b/tests/unit/engines/test_base.py @@ -21,10 +21,10 @@ class TestBaseEngine(SearxTestCase): self.assertRaises(AttributeError, base.response, '') self.assertRaises(AttributeError, base.response, '[]') - response = mock.Mock(text='<response></response>') + response = mock.Mock(content=b'<response></response>') self.assertEqual(base.response(response), []) - xml_mock = """<?xml version="1.0"?> + xml_mock = b"""<?xml version="1.0"?> <response> <lst name="responseHeader"> <int name="status">0</int> @@ -83,7 +83,7 @@ class TestBaseEngine(SearxTestCase): </result> </response>""" - response = mock.Mock(text=xml_mock.encode('utf-8')) + response = mock.Mock(content=xml_mock) results = base.response(response) self.assertEqual(type(results), list) self.assertEqual(len(results), 1) |