summaryrefslogtreecommitdiff
path: root/searx/utils.py
diff options
context:
space:
mode:
authorMarkus Heiser <markus.heiser@darmarit.de>2021-12-27 09:26:22 +0100
committerMarkus Heiser <markus.heiser@darmarit.de>2021-12-27 09:26:22 +0100
commit3d96a9839a12649874b6d4cf9466bd3616b0a03c (patch)
treee7d54d1e345b1e792d538ddc250f4827bb2fd9b9 /searx/utils.py
parentfcdc2c2cd26e24c2aa3f064d93cee3e29dc2a30c (diff)
downloadsearxng-3d96a9839a12649874b6d4cf9466bd3616b0a03c.tar.gz
searxng-3d96a9839a12649874b6d4cf9466bd3616b0a03c.zip
[format.python] initial formatting of the python code
This patch was generated by black [1]:: make format.python [1] https://github.com/psf/black Signed-off-by: Markus Heiser <markus.heiser@darmarit.de>
Diffstat (limited to 'searx/utils.py')
-rw-r--r--searx/utils.py22
1 files changed, 9 insertions, 13 deletions
diff --git a/searx/utils.py b/searx/utils.py
index 163892e93..d44bb73ea 100644
--- a/searx/utils.py
+++ b/searx/utils.py
@@ -23,8 +23,7 @@ from searx import logger
logger = logger.getChild('utils')
-blocked_tags = ('script',
- 'style')
+blocked_tags = ('script', 'style')
ecma_unescape4_re = re.compile(r'%u([0-9a-fA-F]{4})', re.UNICODE)
ecma_unescape2_re = re.compile(r'%([0-9a-fA-F]{2})', re.UNICODE)
@@ -43,8 +42,8 @@ NOTSET = NotSetClass()
def searx_useragent():
"""Return the searx User Agent"""
return 'searx/{searx_version} {suffix}'.format(
- searx_version=VERSION_TAG,
- suffix=settings['outgoing']['useragent_suffix']).strip()
+ searx_version=VERSION_TAG, suffix=settings['outgoing']['useragent_suffix']
+ ).strip()
def gen_useragent(os=None):
@@ -60,7 +59,6 @@ class HTMLTextExtractorException(Exception):
class HTMLTextExtractor(HTMLParser): # pylint: disable=W0223 # (see https://bugs.python.org/issue31844)
-
def __init__(self):
HTMLParser.__init__(self)
self.result = []
@@ -135,10 +133,10 @@ def html_to_text(html_str):
def extract_text(xpath_results, allow_none=False):
"""Extract text from a lxml result
- * if xpath_results is list, extract the text from each result and concat the list
- * if xpath_results is a xml element, extract all the text node from it
- ( text_content() method from lxml )
- * if xpath_results is a string element, then it's already done
+ * if xpath_results is list, extract the text from each result and concat the list
+ * if xpath_results is a xml element, extract all the text node from it
+ ( text_content() method from lxml )
+ * if xpath_results is a string element, then it's already done
"""
if isinstance(xpath_results, list):
# it's list of result : concat everything using recursive call
@@ -148,9 +146,7 @@ def extract_text(xpath_results, allow_none=False):
return result.strip()
elif isinstance(xpath_results, ElementBase):
# it's a element
- text = html.tostring(
- xpath_results, encoding='unicode', method='text', with_tail=False
- )
+ text = html.tostring(xpath_results, encoding='unicode', method='text', with_tail=False)
text = text.strip().replace('\n', ' ')
return ' '.join(text.split())
elif isinstance(xpath_results, (_ElementStringResult, _ElementUnicodeResult, str, Number, bool)):
@@ -344,7 +340,7 @@ def is_valid_lang(lang):
"""
if isinstance(lang, bytes):
lang = lang.decode()
- is_abbr = (len(lang) == 2)
+ is_abbr = len(lang) == 2
lang = lang.lower()
if is_abbr:
for l in language_codes: