summaryrefslogtreecommitdiff
path: root/searx/engines
diff options
context:
space:
mode:
authorDalf <alex@al-f.net>2019-04-12 02:42:47 +0200
committerDalf <alex@al-f.net>2019-05-28 04:06:35 +0200
commitffe0972f91ca8e488ffd8bd9926c745f24507d5b (patch)
tree759721fc40b7ca21d1c1256eca2f0dbc1bdc0980 /searx/engines
parent6c95ebcff5cbf3b154969648012dd1ac7678583b (diff)
downloadsearxng-ffe0972f91ca8e488ffd8bd9926c745f24507d5b.tar.gz
searxng-ffe0972f91ca8e488ffd8bd9926c745f24507d5b.zip
Remove some engines : subtitleseeker, seedpeer, swisscows
http://www.subtitleseeker.com and http://www.seedpeer.eu don't exist anymore. https://swisscows.ch/ has change : the engine needs to be updated
Diffstat (limited to 'searx/engines')
-rw-r--r--searx/engines/seedpeer.py75
-rw-r--r--searx/engines/subtitleseeker.py86
-rw-r--r--searx/engines/swisscows.py125
3 files changed, 0 insertions, 286 deletions
diff --git a/searx/engines/seedpeer.py b/searx/engines/seedpeer.py
deleted file mode 100644
index 3770dacac..000000000
--- a/searx/engines/seedpeer.py
+++ /dev/null
@@ -1,75 +0,0 @@
-# Seedpeer (Videos, Music, Files)
-#
-# @website http://seedpeer.eu
-# @provide-api no (nothing found)
-#
-# @using-api no
-# @results HTML (using search portal)
-# @stable yes (HTML can change)
-# @parse url, title, content, seed, leech, magnetlink
-
-from lxml import html
-from operator import itemgetter
-from searx.url_utils import quote, urljoin
-
-
-url = 'http://www.seedpeer.eu/'
-search_url = url + 'search/{search_term}/7/{page_no}.html'
-# specific xpath variables
-torrent_xpath = '//*[@id="body"]/center/center/table[2]/tr/td/a'
-alternative_torrent_xpath = '//*[@id="body"]/center/center/table[1]/tr/td/a'
-title_xpath = '//*[@id="body"]/center/center/table[2]/tr/td/a/text()'
-alternative_title_xpath = '//*[@id="body"]/center/center/table/tr/td/a'
-seeds_xpath = '//*[@id="body"]/center/center/table[2]/tr/td[4]/font/text()'
-alternative_seeds_xpath = '//*[@id="body"]/center/center/table/tr/td[4]/font/text()'
-peers_xpath = '//*[@id="body"]/center/center/table[2]/tr/td[5]/font/text()'
-alternative_peers_xpath = '//*[@id="body"]/center/center/table/tr/td[5]/font/text()'
-age_xpath = '//*[@id="body"]/center/center/table[2]/tr/td[2]/text()'
-alternative_age_xpath = '//*[@id="body"]/center/center/table/tr/td[2]/text()'
-size_xpath = '//*[@id="body"]/center/center/table[2]/tr/td[3]/text()'
-alternative_size_xpath = '//*[@id="body"]/center/center/table/tr/td[3]/text()'
-
-
-# do search-request
-def request(query, params):
- params['url'] = search_url.format(search_term=quote(query),
- page_no=params['pageno'] - 1)
- return params
-
-
-# get response from search-request
-def response(resp):
- results = []
- dom = html.fromstring(resp.text)
- torrent_links = dom.xpath(torrent_xpath)
- if len(torrent_links) > 0:
- seeds = dom.xpath(seeds_xpath)
- peers = dom.xpath(peers_xpath)
- titles = dom.xpath(title_xpath)
- sizes = dom.xpath(size_xpath)
- ages = dom.xpath(age_xpath)
- else: # under ~5 results uses a different xpath
- torrent_links = dom.xpath(alternative_torrent_xpath)
- seeds = dom.xpath(alternative_seeds_xpath)
- peers = dom.xpath(alternative_peers_xpath)
- titles = dom.xpath(alternative_title_xpath)
- sizes = dom.xpath(alternative_size_xpath)
- ages = dom.xpath(alternative_age_xpath)
- # return empty array if nothing is found
- if not torrent_links:
- return []
-
- # parse results
- for index, result in enumerate(torrent_links):
- link = result.attrib.get('href')
- href = urljoin(url, link)
- results.append({'url': href,
- 'title': titles[index].text_content(),
- 'content': '{}, {}'.format(sizes[index], ages[index]),
- 'seed': seeds[index],
- 'leech': peers[index],
-
- 'template': 'torrent.html'})
-
- # return results sorted by seeder
- return sorted(results, key=itemgetter('seed'), reverse=True)
diff --git a/searx/engines/subtitleseeker.py b/searx/engines/subtitleseeker.py
deleted file mode 100644
index 2cbc991b3..000000000
--- a/searx/engines/subtitleseeker.py
+++ /dev/null
@@ -1,86 +0,0 @@
-"""
- Subtitleseeker (Video)
-
- @website http://www.subtitleseeker.com
- @provide-api no
-
- @using-api no
- @results HTML
- @stable no (HTML can change)
- @parse url, title, content
-"""
-
-from lxml import html
-from searx.languages import language_codes
-from searx.engines.xpath import extract_text
-from searx.url_utils import quote_plus
-
-# engine dependent config
-categories = ['videos']
-paging = True
-language = ""
-
-# search-url
-url = 'http://www.subtitleseeker.com/'
-search_url = url + 'search/TITLES/{query}?p={pageno}'
-
-# specific xpath variables
-results_xpath = '//div[@class="boxRows"]'
-
-
-# do search-request
-def request(query, params):
- params['url'] = search_url.format(query=quote_plus(query),
- pageno=params['pageno'])
- return params
-
-
-# get response from search-request
-def response(resp):
- results = []
-
- dom = html.fromstring(resp.text)
-
- search_lang = ""
-
- # dirty fix for languages named differenly in their site
- if resp.search_params['language'][:2] == 'fa':
- search_lang = 'Farsi'
- elif resp.search_params['language'] == 'pt-BR':
- search_lang = 'Brazilian'
- elif resp.search_params['language'] != 'all':
- search_lang = [lc[3]
- for lc in language_codes
- if lc[0].split('-')[0] == resp.search_params['language'].split('-')[0]]
- search_lang = search_lang[0].split(' (')[0]
-
- # parse results
- for result in dom.xpath(results_xpath):
- link = result.xpath(".//a")[0]
- href = link.attrib.get('href')
-
- if language is not "":
- href = href + language + '/'
- elif search_lang:
- href = href + search_lang + '/'
-
- title = extract_text(link)
-
- content = extract_text(result.xpath('.//div[contains(@class,"red")]'))
- content = content + " - "
- text = extract_text(result.xpath('.//div[contains(@class,"grey-web")]')[0])
- content = content + text
-
- if result.xpath(".//span") != []:
- content = content +\
- " - (" +\
- extract_text(result.xpath(".//span")) +\
- ")"
-
- # append result
- results.append({'url': href,
- 'title': title,
- 'content': content})
-
- # return results
- return results
diff --git a/searx/engines/swisscows.py b/searx/engines/swisscows.py
deleted file mode 100644
index e451bcffc..000000000
--- a/searx/engines/swisscows.py
+++ /dev/null
@@ -1,125 +0,0 @@
-"""
- Swisscows (Web, Images)
-
- @website https://swisscows.ch
- @provide-api no
-
- @using-api no
- @results HTML (using search portal)
- @stable no (HTML can change)
- @parse url, title, content
-"""
-
-from json import loads
-import re
-from lxml.html import fromstring
-from searx.url_utils import unquote, urlencode
-from searx.utils import match_language
-
-# engine dependent config
-categories = ['general', 'images']
-paging = True
-language_support = True
-
-# search-url
-base_url = 'https://swisscows.ch/'
-search_string = '?{query}&page={page}'
-
-supported_languages_url = base_url
-
-# regex
-regex_json = re.compile(b'initialData: {"Request":(.|\n)*},\s*environment')
-regex_json_remove_start = re.compile(b'^initialData:\s*')
-regex_json_remove_end = re.compile(b',\s*environment$')
-regex_img_url_remove_start = re.compile(b'^https?://i\.swisscows\.ch/\?link=')
-
-
-# do search-request
-def request(query, params):
- if params['language'] == 'all':
- ui_language = 'browser'
- region = 'browser'
- else:
- region = match_language(params['language'], supported_languages, language_aliases)
- ui_language = region.split('-')[0]
-
- search_path = search_string.format(
- query=urlencode({'query': query, 'uiLanguage': ui_language, 'region': region}),
- page=params['pageno']
- )
-
- # image search query is something like 'image?{query}&page={page}'
- if params['category'] == 'images':
- search_path = 'image' + search_path
-
- params['url'] = base_url + search_path
-
- return params
-
-
-# get response from search-request
-def response(resp):
- results = []
-
- json_regex = regex_json.search(resp.text)
-
- # check if results are returned
- if not json_regex:
- return []
-
- json_raw = regex_json_remove_end.sub(b'', regex_json_remove_start.sub(b'', json_regex.group()))
- json = loads(json_raw.decode('utf-8'))
-
- # parse results
- for result in json['Results'].get('items', []):
- result_title = result['Title'].replace(u'\uE000', '').replace(u'\uE001', '')
-
- # parse image results
- if result.get('ContentType', '').startswith('image'):
- img_url = unquote(regex_img_url_remove_start.sub(b'', result['Url'].encode('utf-8')).decode('utf-8'))
-
- # append result
- results.append({'url': result['SourceUrl'],
- 'title': result['Title'],
- 'content': '',
- 'img_src': img_url,
- 'template': 'images.html'})
-
- # parse general results
- else:
- result_url = result['Url'].replace(u'\uE000', '').replace(u'\uE001', '')
- result_content = result['Description'].replace(u'\uE000', '').replace(u'\uE001', '')
-
- # append result
- results.append({'url': result_url,
- 'title': result_title,
- 'content': result_content})
-
- # parse images
- for result in json.get('Images', []):
- # decode image url
- img_url = unquote(regex_img_url_remove_start.sub(b'', result['Url'].encode('utf-8')).decode('utf-8'))
-
- # append result
- results.append({'url': result['SourceUrl'],
- 'title': result['Title'],
- 'content': '',
- 'img_src': img_url,
- 'template': 'images.html'})
-
- # return results
- return results
-
-
-# get supported languages from their site
-def _fetch_supported_languages(resp):
- supported_languages = []
- dom = fromstring(resp.text)
- options = dom.xpath('//div[@id="regions-popup"]//ul/li/a')
- for option in options:
- code = option.xpath('./@data-search-language')[0]
- if code.startswith('nb-'):
- code = code.replace('nb', 'no', 1)
- supported_languages.append(code)
-
- return supported_languages