diff options
author | Adam Tauber <asciimoo@gmail.com> | 2014-10-19 12:06:34 +0200 |
---|---|---|
committer | Adam Tauber <asciimoo@gmail.com> | 2014-10-19 12:06:34 +0200 |
commit | 20400c40c34b6122621476c46460c5a3a8624c89 (patch) | |
tree | aa598d3d8b09f489b8d96a1821e7a560b8019672 /searx/webapp.py | |
parent | 840945f498cd07d38cb198cc0735b6445f44802c (diff) | |
parent | 1e3e6465845236b027ce8df9ab5fcc78ffe3f1d1 (diff) | |
download | searxng-20400c40c34b6122621476c46460c5a3a8624c89.tar.gz searxng-20400c40c34b6122621476c46460c5a3a8624c89.zip |
Merge pull request #97 from pointhi/https
Implementing https rewrite support
Diffstat (limited to 'searx/webapp.py')
-rw-r--r-- | searx/webapp.py | 57 |
1 files changed, 52 insertions, 5 deletions
diff --git a/searx/webapp.py b/searx/webapp.py index 18dc89a39..830cf440a 100644 --- a/searx/webapp.py +++ b/searx/webapp.py @@ -50,6 +50,9 @@ from searx.search import Search from searx.query import Query from searx.autocomplete import backends as autocomplete_backends +from urlparse import urlparse +import re + static_path, templates_path, themes =\ get_themes(settings['themes_path'] @@ -206,16 +209,60 @@ def index(): if not search.paging and engines[result['engine']].paging: search.paging = True + # check if HTTPS rewrite is required if settings['server']['https_rewrite']\ and result['parsed_url'].scheme == 'http': - for http_regex, https_url in https_rules: - if http_regex.match(result['url']): - result['url'] = http_regex.sub(https_url, result['url']) - # TODO result['parsed_url'].scheme + skip_https_rewrite = False + + # check if HTTPS rewrite is possible + for target, rules, exclusions in https_rules: + + # check if target regex match with url + if target.match(result['url']): + # process exclusions + for exclusion in exclusions: + # check if exclusion match with url + if exclusion.match(result['url']): + skip_https_rewrite = True + break + + # skip https rewrite if required + if skip_https_rewrite: + break + + # process rules + for rule in rules: + try: + # TODO, precompile rule + p = re.compile(rule[0]) + + # rewrite url if possible + new_result_url = p.sub(rule[1], result['url']) + except: + break + + # parse new url + new_parsed_url = urlparse(new_result_url) + + # continiue if nothing was rewritten + if result['url'] == new_result_url: + continue + + # get domainname from result + # TODO, does only work correct with TLD's like asdf.com, not for asdf.com.de + # TODO, using publicsuffix instead of this rewrite rule + old_result_domainname = '.'.join(result['parsed_url'].hostname.split('.')[-2:]) + new_result_domainname = '.'.join(new_parsed_url.hostname.split('.')[-2:]) + + # check if rewritten hostname is the same, to protect against wrong or malicious rewrite rules + if old_result_domainname == new_result_domainname: + # set new url + result['url'] = new_result_url + + # target has matched, do not search over the other rules break - # HTTPS rewrite if search.request_data.get('format', 'html') == 'html': if 'content' in result: result['content'] = highlight_content(result['content'], |