2015-10-03 12:53:45 +02:00
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
|
2019-01-29 13:14:30 +01:00
|
|
|
# Copyright 2015-2019 Mike Fährmann
|
2015-10-03 12:53:45 +02:00
|
|
|
#
|
|
|
|
# This program is free software; you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License version 2 as
|
|
|
|
# published by the Free Software Foundation.
|
|
|
|
|
2019-01-29 13:14:30 +01:00
|
|
|
"""Collection of functions that work on strings/text"""
|
2015-10-03 12:53:45 +02:00
|
|
|
|
|
|
|
import re
|
2016-02-18 15:54:58 +01:00
|
|
|
import html
|
2018-04-20 14:53:21 +02:00
|
|
|
import os.path
|
2019-04-21 15:28:27 +02:00
|
|
|
import datetime
|
2015-10-03 12:53:45 +02:00
|
|
|
import urllib.parse
|
|
|
|
|
2017-01-30 19:40:15 +01:00
|
|
|
|
2020-03-13 23:30:16 +01:00
|
|
|
HTML_RE = re.compile("<[^>]+>")
|
|
|
|
|
2018-04-14 20:56:21 +02:00
|
|
|
INVALID_XML_CHARS = (
|
|
|
|
"\x00", "\x01", "\x02", "\x03", "\x04", "\x05", "\x06", "\x07",
|
|
|
|
"\x08", "\x0b", "\x0c", "\x0e", "\x0f", "\x10", "\x11", "\x12",
|
|
|
|
"\x13", "\x14", "\x15", "\x16", "\x17", "\x18", "\x19", "\x1a",
|
|
|
|
"\x1b", "\x1c", "\x1d", "\x1e", "\x1f",
|
|
|
|
)
|
2017-03-14 09:09:04 +01:00
|
|
|
|
|
|
|
|
|
|
|
def clean_xml(xmldata, repl=""):
|
2018-04-14 20:56:21 +02:00
|
|
|
"""Replace/Remove invalid control characters in 'xmldata'"""
|
|
|
|
if not isinstance(xmldata, str):
|
|
|
|
try:
|
|
|
|
xmldata = "".join(xmldata)
|
|
|
|
except TypeError:
|
|
|
|
return ""
|
2017-03-14 09:09:04 +01:00
|
|
|
for char in INVALID_XML_CHARS:
|
|
|
|
if char in xmldata:
|
|
|
|
xmldata = xmldata.replace(char, repl)
|
|
|
|
return xmldata
|
|
|
|
|
|
|
|
|
2019-07-17 14:48:24 +02:00
|
|
|
def remove_html(txt, repl=" ", sep=" "):
|
2015-10-03 12:53:45 +02:00
|
|
|
"""Remove html-tags from a string"""
|
2018-04-14 22:09:42 +02:00
|
|
|
try:
|
2020-03-13 23:30:16 +01:00
|
|
|
txt = HTML_RE.sub(repl, txt)
|
2018-04-14 22:09:42 +02:00
|
|
|
except TypeError:
|
|
|
|
return ""
|
2019-07-17 14:48:24 +02:00
|
|
|
if sep:
|
|
|
|
return sep.join(txt.split())
|
|
|
|
return txt.strip()
|
2015-10-03 12:53:45 +02:00
|
|
|
|
2017-01-30 19:40:15 +01:00
|
|
|
|
2018-05-27 15:00:41 +02:00
|
|
|
def split_html(txt, sep=None):
|
|
|
|
"""Split input string by html-tags"""
|
|
|
|
try:
|
|
|
|
return [
|
2020-03-13 23:30:16 +01:00
|
|
|
x.strip() for x in HTML_RE.split(txt)
|
2018-05-27 15:00:41 +02:00
|
|
|
if x and not x.isspace()
|
|
|
|
]
|
|
|
|
except TypeError:
|
|
|
|
return []
|
|
|
|
|
|
|
|
|
2015-10-03 12:53:45 +02:00
|
|
|
def filename_from_url(url):
|
2019-01-31 12:23:25 +01:00
|
|
|
"""Extract the last part of an URL to use as a filename"""
|
2015-10-03 12:53:45 +02:00
|
|
|
try:
|
2018-02-18 16:50:07 +01:00
|
|
|
return urllib.parse.urlsplit(url).path.rpartition("/")[2]
|
2018-04-14 22:09:42 +02:00
|
|
|
except (TypeError, AttributeError):
|
|
|
|
return ""
|
2015-10-03 12:53:45 +02:00
|
|
|
|
2017-01-30 19:40:15 +01:00
|
|
|
|
2019-01-31 12:23:25 +01:00
|
|
|
def ext_from_url(url):
|
|
|
|
"""Extract the filename extension of an URL"""
|
|
|
|
filename = filename_from_url(url)
|
|
|
|
ext = os.path.splitext(filename)[1]
|
|
|
|
return ext[1:].lower()
|
|
|
|
|
|
|
|
|
2015-11-16 02:20:22 +01:00
|
|
|
def nameext_from_url(url, data=None):
|
2019-01-31 12:23:25 +01:00
|
|
|
"""Extract the last part of an URL and fill 'data' accordingly"""
|
2015-11-16 02:20:22 +01:00
|
|
|
if data is None:
|
|
|
|
data = {}
|
2019-02-14 16:07:17 +01:00
|
|
|
name = unquote(filename_from_url(url))
|
|
|
|
data["filename"], ext = os.path.splitext(name)
|
2015-11-16 17:40:05 +01:00
|
|
|
data["extension"] = ext[1:].lower()
|
2015-11-16 02:20:22 +01:00
|
|
|
return data
|
|
|
|
|
2017-01-30 19:40:15 +01:00
|
|
|
|
2015-10-03 12:53:45 +02:00
|
|
|
def extract(txt, begin, end, pos=0):
|
2015-11-02 15:52:26 +01:00
|
|
|
"""Extract the text between 'begin' and 'end' from 'txt'
|
|
|
|
|
|
|
|
Args:
|
|
|
|
txt: String to search in
|
|
|
|
begin: First string to be searched for
|
|
|
|
end: Second string to be searched for after 'begin'
|
|
|
|
pos: Starting position for searches in 'txt'
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
The string between the two search-strings 'begin' and 'end' beginning
|
|
|
|
with position 'pos' in 'txt' as well as the position after 'end'.
|
|
|
|
|
|
|
|
If at least one of 'begin' or 'end' is not found, None and the original
|
|
|
|
value of 'pos' is returned
|
|
|
|
|
|
|
|
Examples:
|
|
|
|
extract("abcde", "b", "d") -> "c" , 4
|
|
|
|
extract("abcde", "b", "d", 3) -> None, 3
|
|
|
|
"""
|
2015-10-03 12:53:45 +02:00
|
|
|
try:
|
|
|
|
first = txt.index(begin, pos) + len(begin)
|
|
|
|
last = txt.index(end, first)
|
|
|
|
return txt[first:last], last+len(end)
|
2018-04-14 22:09:42 +02:00
|
|
|
except (ValueError, TypeError, AttributeError):
|
2015-10-03 12:53:45 +02:00
|
|
|
return None, pos
|
|
|
|
|
2017-01-30 19:40:15 +01:00
|
|
|
|
2019-05-28 21:03:41 +02:00
|
|
|
def rextract(txt, begin, end, pos=-1):
|
|
|
|
try:
|
|
|
|
lbeg = len(begin)
|
|
|
|
first = txt.rindex(begin, 0, pos)
|
|
|
|
last = txt.index(end, first + lbeg)
|
|
|
|
return txt[first + lbeg:last], first
|
|
|
|
except (ValueError, TypeError, AttributeError):
|
|
|
|
return None, pos
|
|
|
|
|
|
|
|
|
2015-11-03 00:05:18 +01:00
|
|
|
def extract_all(txt, rules, pos=0, values=None):
|
2015-11-02 15:51:32 +01:00
|
|
|
"""Calls extract for each rule and returns the result in a dict"""
|
2015-11-03 00:05:18 +01:00
|
|
|
if values is None:
|
|
|
|
values = {}
|
2015-11-02 15:51:32 +01:00
|
|
|
for key, begin, end in rules:
|
|
|
|
result, pos = extract(txt, begin, end, pos)
|
|
|
|
if key:
|
|
|
|
values[key] = result
|
|
|
|
return values, pos
|
2015-10-03 12:53:45 +02:00
|
|
|
|
2017-01-30 19:40:15 +01:00
|
|
|
|
2015-11-28 01:46:34 +01:00
|
|
|
def extract_iter(txt, begin, end, pos=0):
|
2019-04-18 23:37:17 +02:00
|
|
|
"""Yield values that would be returned by repeated calls of extract()"""
|
|
|
|
index = txt.index
|
|
|
|
lbeg = len(begin)
|
|
|
|
lend = len(end)
|
|
|
|
try:
|
|
|
|
while True:
|
|
|
|
first = index(begin, pos) + lbeg
|
|
|
|
last = index(end, first)
|
|
|
|
pos = last + lend
|
|
|
|
yield txt[first:last]
|
|
|
|
except (ValueError, TypeError, AttributeError):
|
|
|
|
return
|
2015-11-28 01:46:34 +01:00
|
|
|
|
2017-01-30 19:40:15 +01:00
|
|
|
|
2019-04-19 22:30:11 +02:00
|
|
|
def extract_from(txt, pos=0, default=""):
|
|
|
|
"""Returns a function object that extracts from 'txt'"""
|
|
|
|
def extr(begin, end, index=txt.index, txt=txt):
|
|
|
|
nonlocal pos
|
|
|
|
try:
|
|
|
|
first = index(begin, pos) + len(begin)
|
|
|
|
last = index(end, first)
|
|
|
|
pos = last + len(end)
|
|
|
|
return txt[first:last]
|
|
|
|
except (ValueError, TypeError, AttributeError):
|
|
|
|
return default
|
|
|
|
return extr
|
|
|
|
|
|
|
|
|
2019-06-16 21:46:26 +02:00
|
|
|
def parse_unicode_escapes(txt):
|
|
|
|
"""Convert JSON Unicode escapes in 'txt' into actual characters"""
|
|
|
|
if "\\u" in txt:
|
|
|
|
return re.sub(r"\\u([0-9a-fA-F]{4})", _hex_to_char, txt)
|
|
|
|
return txt
|
|
|
|
|
|
|
|
|
|
|
|
def _hex_to_char(match):
|
|
|
|
return chr(int(match.group(1), 16))
|
|
|
|
|
|
|
|
|
2018-04-20 14:53:21 +02:00
|
|
|
def parse_bytes(value, default=0, suffixes="bkmgtp"):
|
|
|
|
"""Convert a bytes-amount ("500k", "2.5M", ...) to int"""
|
|
|
|
try:
|
|
|
|
last = value[-1].lower()
|
|
|
|
except (TypeError, KeyError, IndexError):
|
|
|
|
return default
|
|
|
|
|
|
|
|
if last in suffixes:
|
|
|
|
mul = 1024 ** suffixes.index(last)
|
|
|
|
value = value[:-1]
|
|
|
|
else:
|
|
|
|
mul = 1
|
|
|
|
|
|
|
|
try:
|
|
|
|
return round(float(value) * mul)
|
|
|
|
except ValueError:
|
|
|
|
return default
|
|
|
|
|
|
|
|
|
|
|
|
def parse_int(value, default=0):
|
|
|
|
"""Convert 'value' to int"""
|
|
|
|
if not value:
|
|
|
|
return default
|
|
|
|
try:
|
|
|
|
return int(value)
|
|
|
|
except (ValueError, TypeError):
|
|
|
|
return default
|
|
|
|
|
|
|
|
|
2019-01-29 13:14:30 +01:00
|
|
|
def parse_float(value, default=0.0):
|
|
|
|
"""Convert 'value' to float"""
|
|
|
|
if not value:
|
|
|
|
return default
|
|
|
|
try:
|
|
|
|
return float(value)
|
|
|
|
except (ValueError, TypeError):
|
|
|
|
return default
|
|
|
|
|
|
|
|
|
2017-08-24 20:55:58 +02:00
|
|
|
def parse_query(qs):
|
|
|
|
"""Parse a query string into key-value pairs"""
|
2018-04-13 19:21:32 +02:00
|
|
|
result = {}
|
2018-04-14 22:09:42 +02:00
|
|
|
try:
|
|
|
|
for key, value in urllib.parse.parse_qsl(qs):
|
|
|
|
if key not in result:
|
|
|
|
result[key] = value
|
|
|
|
except AttributeError:
|
|
|
|
pass
|
2018-04-13 19:21:32 +02:00
|
|
|
return result
|
2017-08-24 20:55:58 +02:00
|
|
|
|
|
|
|
|
2019-04-21 15:28:27 +02:00
|
|
|
def parse_timestamp(ts, default=None):
|
|
|
|
"""Create a datetime object from a unix timestamp"""
|
|
|
|
try:
|
2019-04-21 16:22:53 +02:00
|
|
|
return datetime.datetime.utcfromtimestamp(int(ts))
|
2019-04-21 15:28:27 +02:00
|
|
|
except (TypeError, ValueError, OverflowError):
|
|
|
|
return default
|
|
|
|
|
|
|
|
|
2020-04-11 02:05:00 +02:00
|
|
|
def parse_datetime(date_string, format="%Y-%m-%dT%H:%M:%S%z", utcoffset=0):
|
2019-05-08 00:00:00 +02:00
|
|
|
"""Create a datetime object by parsing 'date_string'"""
|
|
|
|
try:
|
2019-05-09 21:53:17 +02:00
|
|
|
if format.endswith("%z") and date_string[-3] == ":":
|
|
|
|
# workaround for Python < 3.7: +00:00 -> +0000
|
2019-05-25 23:22:26 +02:00
|
|
|
ds = date_string[:-3] + date_string[-2:]
|
|
|
|
else:
|
|
|
|
ds = date_string
|
|
|
|
d = datetime.datetime.strptime(ds, format)
|
2019-05-08 00:00:00 +02:00
|
|
|
o = d.utcoffset()
|
|
|
|
if o is not None:
|
2020-04-11 02:05:00 +02:00
|
|
|
# convert to naive UTC
|
|
|
|
d = d.replace(tzinfo=None) - o
|
|
|
|
elif utcoffset:
|
|
|
|
# apply manual UTC offset
|
|
|
|
d += datetime.timedelta(0, utcoffset * -3600)
|
2019-05-08 00:00:00 +02:00
|
|
|
return d
|
2019-05-09 21:53:17 +02:00
|
|
|
except (TypeError, IndexError, KeyError):
|
2019-05-08 00:00:00 +02:00
|
|
|
return None
|
|
|
|
except (ValueError, OverflowError):
|
|
|
|
return date_string
|
|
|
|
|
|
|
|
|
2018-04-20 14:53:21 +02:00
|
|
|
urljoin = urllib.parse.urljoin
|
2019-01-29 13:14:30 +01:00
|
|
|
|
|
|
|
quote = urllib.parse.quote
|
2015-10-03 12:53:45 +02:00
|
|
|
unquote = urllib.parse.unquote
|
|
|
|
|
2019-01-29 13:14:30 +01:00
|
|
|
escape = html.escape
|
|
|
|
unescape = html.unescape
|