summaryrefslogtreecommitdiff
path: root/searx/engines
diff options
context:
space:
mode:
authorasciimoo <asciimoo@gmail.com>2014-02-11 13:13:51 +0100
committerasciimoo <asciimoo@gmail.com>2014-02-11 13:13:51 +0100
commitc1d7d30b8ec2950a6338f0b99ebe9bdc094fdb73 (patch)
treebc2a38ccacc7d94721ab4b3942d83617ef3662f6 /searx/engines
parent239299d45ec7698e45451b617f2ef52bfb2c2e88 (diff)
downloadsearxng-c1d7d30b8ec2950a6338f0b99ebe9bdc094fdb73.tar.gz
searxng-c1d7d30b8ec2950a6338f0b99ebe9bdc094fdb73.zip
[mod] len() removed from conditions
Diffstat (limited to 'searx/engines')
-rw-r--r--searx/engines/__init__.py2
-rw-r--r--searx/engines/json_engine.py2
-rw-r--r--searx/engines/startpage.py2
-rw-r--r--searx/engines/xpath.py2
-rw-r--r--searx/engines/yacy.py6
-rw-r--r--searx/engines/youtube.py10
6 files changed, 13 insertions, 11 deletions
diff --git a/searx/engines/__init__.py b/searx/engines/__init__.py
index 358548247..b2750d0c2 100644
--- a/searx/engines/__init__.py
+++ b/searx/engines/__init__.py
@@ -163,7 +163,7 @@ def score_results(results):
duplicated = new_res
break
if duplicated:
- if len(res.get('content', '')) > len(duplicated.get('content', '')): # noqa
+ if res.get('content') > duplicated.get('content'):
duplicated['content'] = res['content']
duplicated['score'] += score
duplicated['engines'].append(res['engine'])
diff --git a/searx/engines/json_engine.py b/searx/engines/json_engine.py
index e7cc808bb..708b999f8 100644
--- a/searx/engines/json_engine.py
+++ b/searx/engines/json_engine.py
@@ -39,7 +39,7 @@ def parse(query):
def do_query(data, q):
ret = []
- if not len(q):
+ if not q:
return ret
qkey = q[0]
diff --git a/searx/engines/startpage.py b/searx/engines/startpage.py
index fcfc21160..cbafe0976 100644
--- a/searx/engines/startpage.py
+++ b/searx/engines/startpage.py
@@ -35,7 +35,7 @@ def response(resp):
title = link.text_content()
content = ''
- if len(result.xpath('./p[@class="desc"]')):
+ if result.xpath('./p[@class="desc"]'):
content = result.xpath('./p[@class="desc"]')[0].text_content()
results.append({'url': url, 'title': title, 'content': content})
diff --git a/searx/engines/xpath.py b/searx/engines/xpath.py
index 04b021e33..9af24de3b 100644
--- a/searx/engines/xpath.py
+++ b/searx/engines/xpath.py
@@ -23,7 +23,7 @@ if xpath_results is a string element, then it's already done
def extract_text(xpath_results):
if type(xpath_results) == list:
# it's list of result : concat everything using recursive call
- if not len(xpath_results):
+ if not xpath_results:
raise Exception('Empty url resultset')
result = ''
for e in xpath_results:
diff --git a/searx/engines/yacy.py b/searx/engines/yacy.py
index a4a41ac3b..efdf846ac 100644
--- a/searx/engines/yacy.py
+++ b/searx/engines/yacy.py
@@ -13,7 +13,7 @@ def request(query, params):
def response(resp):
raw_search_results = loads(resp.text)
- if not len(raw_search_results):
+ if not raw_search_results:
return []
search_results = raw_search_results.get('channels', {})[0].get('items', [])
@@ -26,10 +26,10 @@ def response(resp):
tmp_result['url'] = result['link']
tmp_result['content'] = ''
- if len(result['description']):
+ if result['description']:
tmp_result['content'] += result['description'] + "<br/>"
- if len(result['pubDate']):
+ if result['pubDate']:
tmp_result['content'] += result['pubDate'] + "<br/>"
if result['size'] != '-1':
diff --git a/searx/engines/youtube.py b/searx/engines/youtube.py
index 6e4e8859a..5b04f3513 100644
--- a/searx/engines/youtube.py
+++ b/searx/engines/youtube.py
@@ -22,9 +22,10 @@ def response(resp):
if not 'feed' in search_results:
return results
feed = search_results['feed']
+
for result in feed['entry']:
url = [x['href'] for x in result['link'] if x['type'] == 'text/html']
- if not len(url):
+ if not url:
return
# remove tracking
url = url[0].replace('feature=youtube_gdata', '')
@@ -32,12 +33,13 @@ def response(resp):
url = url[:-1]
title = result['title']['$t']
content = ''
-
thumbnail = ''
- if len(result['media$group']['media$thumbnail']):
+
+ if result['media$group']['media$thumbnail']:
thumbnail = result['media$group']['media$thumbnail'][0]['url']
content += '<a href="{0}" title="{0}" ><img src="{1}" /></a>'.format(url, thumbnail) # noqa
- if len(content):
+
+ if content:
content += '<br />' + result['content']['$t']
else:
content = result['content']['$t']