summaryrefslogtreecommitdiff
path: root/searx/engines/bing_news.py
diff options
context:
space:
mode:
authorCqoicebordel <Cqoicebordel@users.noreply.github.com>2015-01-29 20:56:57 +0100
committerCqoicebordel <Cqoicebordel@users.noreply.github.com>2015-01-29 20:56:57 +0100
commitefde2c21c8656ad21b24980b516ddbbf2e209523 (patch)
tree2c06eb69c5b8c150810b4366df7598b3ebdad264 /searx/engines/bing_news.py
parentdad0434f34f04ada2b4b0961bbb714e25c752677 (diff)
downloadsearxng-efde2c21c8656ad21b24980b516ddbbf2e209523.tar.gz
searxng-efde2c21c8656ad21b24980b516ddbbf2e209523.zip
Bing news' unit test
I have no idea why coverage tell 97% and 2 misses in branches. If anyone has an idea...
Diffstat (limited to 'searx/engines/bing_news.py')
-rw-r--r--searx/engines/bing_news.py29
1 files changed, 12 insertions, 17 deletions
diff --git a/searx/engines/bing_news.py b/searx/engines/bing_news.py
index 789a23b89..182bd36b5 100644
--- a/searx/engines/bing_news.py
+++ b/searx/engines/bing_news.py
@@ -15,6 +15,7 @@ from lxml import html
from datetime import datetime, timedelta
from dateutil import parser
import re
+from searx.engines.xpath import extract_text
# engine dependent config
categories = ['news']
@@ -42,6 +43,7 @@ def request(query, params):
params['cookies']['_FP'] = "ui=en-US"
params['url'] = base_url + search_path
+
return params
@@ -55,44 +57,37 @@ def response(resp):
for result in dom.xpath('//div[@class="sn_r"]'):
link = result.xpath('.//div[@class="newstitle"]/a')[0]
url = link.attrib.get('href')
- title = ' '.join(link.xpath('.//text()'))
- contentXPath = result.xpath('.//div[@class="sn_txt"]/div'
- '//span[@class="sn_snip"]//text()')
+ title = extract_text(link)
+ contentXPath = result.xpath('.//div[@class="sn_txt"]/div//span[@class="sn_snip"]')
if contentXPath is not None:
- content = escape(' '.join(contentXPath))
+ content = escape(extract_text(contentXPath))
# parse publishedDate
publishedDateXPath = result.xpath('.//div[@class="sn_txt"]/div'
'//span[contains(@class,"sn_ST")]'
- '//span[contains(@class,"sn_tm")]'
- '//text()')
+ '//span[contains(@class,"sn_tm")]')
+
if publishedDateXPath is not None:
- publishedDate = escape(' '.join(publishedDateXPath))
+ publishedDate = escape(extract_text(publishedDateXPath))
if re.match("^[0-9]+ minute(s|) ago$", publishedDate):
timeNumbers = re.findall(r'\d+', publishedDate)
- publishedDate = datetime.now()\
- - timedelta(minutes=int(timeNumbers[0]))
+ publishedDate = datetime.now() - timedelta(minutes=int(timeNumbers[0]))
elif re.match("^[0-9]+ hour(s|) ago$", publishedDate):
timeNumbers = re.findall(r'\d+', publishedDate)
- publishedDate = datetime.now()\
- - timedelta(hours=int(timeNumbers[0]))
- elif re.match("^[0-9]+ hour(s|),"
- " [0-9]+ minute(s|) ago$", publishedDate):
+ publishedDate = datetime.now() - timedelta(hours=int(timeNumbers[0]))
+ elif re.match("^[0-9]+ hour(s|), [0-9]+ minute(s|) ago$", publishedDate):
timeNumbers = re.findall(r'\d+', publishedDate)
publishedDate = datetime.now()\
- timedelta(hours=int(timeNumbers[0]))\
- timedelta(minutes=int(timeNumbers[1]))
elif re.match("^[0-9]+ day(s|) ago$", publishedDate):
timeNumbers = re.findall(r'\d+', publishedDate)
- publishedDate = datetime.now()\
- - timedelta(days=int(timeNumbers[0]))
+ publishedDate = datetime.now() - timedelta(days=int(timeNumbers[0]))
else:
try:
- # FIXME use params['language'] to parse either mm/dd or dd/mm
publishedDate = parser.parse(publishedDate, dayfirst=False)
except TypeError:
- # FIXME
publishedDate = datetime.now()
# append result