From b3ab221b9808ba2b7b01d417210af9b9527e661c Mon Sep 17 00:00:00 2001 From: stepshal Date: Mon, 11 Jul 2016 20:29:47 +0700 Subject: Fix anomalous backslash in string --- searx/engines/currency_convert.py | 2 +- searx/engines/deviantart.py | 2 +- searx/engines/google.py | 4 ++-- searx/engines/startpage.py | 10 +++++----- searx/engines/swisscows.py | 8 ++++---- searx/engines/tokyotoshokan.py | 2 +- searx/engines/www500px.py | 2 +- searx/engines/yahoo_news.py | 2 +- 8 files changed, 16 insertions(+), 16 deletions(-) (limited to 'searx/engines') diff --git a/searx/engines/currency_convert.py b/searx/engines/currency_convert.py index b0ffb490a..bc839cfb5 100644 --- a/searx/engines/currency_convert.py +++ b/searx/engines/currency_convert.py @@ -9,7 +9,7 @@ categories = [] url = 'https://download.finance.yahoo.com/d/quotes.csv?e=.csv&f=sl1d1t1&s={query}=X' weight = 100 -parser_re = re.compile(u'.*?(\d+(?:\.\d+)?) ([^.0-9]+) (?:in|to) ([^.0-9]+)', re.I) # noqa +parser_re = re.compile(u'.*?(\\d+(?:\\.\\d+)?) ([^.0-9]+) (?:in|to) ([^.0-9]+)', re.I) # noqa db = 1 diff --git a/searx/engines/deviantart.py b/searx/engines/deviantart.py index 135aeb324..c7816b9bc 100644 --- a/searx/engines/deviantart.py +++ b/searx/engines/deviantart.py @@ -47,7 +47,7 @@ def response(resp): dom = html.fromstring(resp.text) - regex = re.compile('\/200H\/') + regex = re.compile(r'\/200H\/') # parse results for result in dom.xpath('//div[contains(@class, "tt-a tt-fh")]'): diff --git a/searx/engines/google.py b/searx/engines/google.py index 6018ad1b2..fd5e7b54c 100644 --- a/searx/engines/google.py +++ b/searx/engines/google.py @@ -300,9 +300,9 @@ def parse_map_detail(parsed_url, result, google_hostname): results = [] # try to parse the geoloc - m = re.search('@([0-9\.]+),([0-9\.]+),([0-9]+)', parsed_url.path) + m = re.search(r'@([0-9\.]+),([0-9\.]+),([0-9]+)', parsed_url.path) if m is None: - m = re.search('ll\=([0-9\.]+),([0-9\.]+)\&z\=([0-9]+)', parsed_url.query) + m = re.search(r'll\=([0-9\.]+),([0-9\.]+)\&z\=([0-9]+)', parsed_url.query) if m is not None: # geoloc found (ignored) diff --git a/searx/engines/startpage.py b/searx/engines/startpage.py index 52dd0b92f..d8b702c4d 100644 --- a/searx/engines/startpage.py +++ b/searx/engines/startpage.py @@ -68,15 +68,15 @@ def response(resp): url = link.attrib.get('href') # block google-ad url's - if re.match("^http(s|)://(www\.)?google\.[a-z]+/aclk.*$", url): + if re.match(r"^http(s|)://(www\.)?google\.[a-z]+/aclk.*$", url): continue # block startpage search url's - if re.match("^http(s|)://(www\.)?startpage\.com/do/search\?.*$", url): + if re.match(r"^http(s|)://(www\.)?startpage\.com/do/search\?.*$", url): continue # block ixquick search url's - if re.match("^http(s|)://(www\.)?ixquick\.com/do/search\?.*$", url): + if re.match(r"^http(s|)://(www\.)?ixquick\.com/do/search\?.*$", url): continue title = escape(extract_text(link)) @@ -89,7 +89,7 @@ def response(resp): published_date = None # check if search result starts with something like: "2 Sep 2014 ... " - if re.match("^([1-9]|[1-2][0-9]|3[0-1]) [A-Z][a-z]{2} [0-9]{4} \.\.\. ", content): + if re.match(r"^([1-9]|[1-2][0-9]|3[0-1]) [A-Z][a-z]{2} [0-9]{4} \.\.\. ", content): date_pos = content.find('...') + 4 date_string = content[0:date_pos - 5] published_date = parser.parse(date_string, dayfirst=True) @@ -98,7 +98,7 @@ def response(resp): content = content[date_pos:] # check if search result starts with something like: "5 days ago ... " - elif re.match("^[0-9]+ days? ago \.\.\. ", content): + elif re.match(r"^[0-9]+ days? ago \.\.\. ", content): date_pos = content.find('...') + 4 date_string = content[0:date_pos - 5] diff --git a/searx/engines/swisscows.py b/searx/engines/swisscows.py index 864436a52..1a94ed64e 100644 --- a/searx/engines/swisscows.py +++ b/searx/engines/swisscows.py @@ -25,10 +25,10 @@ base_url = 'https://swisscows.ch/' search_string = '?{query}&page={page}' # regex -regex_json = re.compile('initialData: {"Request":(.|\n)*},\s*environment') -regex_json_remove_start = re.compile('^initialData:\s*') -regex_json_remove_end = re.compile(',\s*environment$') -regex_img_url_remove_start = re.compile('^https?://i\.swisscows\.ch/\?link=') +regex_json = re.compile(r'initialData: {"Request":(.|\n)*},\s*environment') +regex_json_remove_start = re.compile(r'^initialData:\s*') +regex_json_remove_end = re.compile(r',\s*environment$') +regex_img_url_remove_start = re.compile(r'^https?://i\.swisscows\.ch/\?link=') # do search-request diff --git a/searx/engines/tokyotoshokan.py b/searx/engines/tokyotoshokan.py index 17e8e2191..e2990e153 100644 --- a/searx/engines/tokyotoshokan.py +++ b/searx/engines/tokyotoshokan.py @@ -48,7 +48,7 @@ def response(resp): return [] # regular expression for parsing torrent size strings - size_re = re.compile('Size:\s*([\d.]+)(TB|GB|MB|B)', re.IGNORECASE) + size_re = re.compile(r'Size:\s*([\d.]+)(TB|GB|MB|B)', re.IGNORECASE) # processing the results, two rows at a time for i in xrange(0, len(rows), 2): diff --git a/searx/engines/www500px.py b/searx/engines/www500px.py index c98e19443..f1bc6c583 100644 --- a/searx/engines/www500px.py +++ b/searx/engines/www500px.py @@ -41,7 +41,7 @@ def response(resp): results = [] dom = html.fromstring(resp.text) - regex = re.compile('3\.jpg.*$') + regex = re.compile(r'3\.jpg.*$') # parse results for result in dom.xpath('//div[@class="photo"]'): diff --git a/searx/engines/yahoo_news.py b/searx/engines/yahoo_news.py index d4cfbeda2..e91c1d34e 100644 --- a/searx/engines/yahoo_news.py +++ b/searx/engines/yahoo_news.py @@ -55,7 +55,7 @@ def request(query, params): def sanitize_url(url): if ".yahoo.com/" in url: - return re.sub(u"\;\_ylt\=.+$", "", url) + return re.sub(u"\\;\\_ylt\\=.+$", "", url) else: return url -- cgit v1.2.3-54-g00ecf