2017-02-20 22:02:49 +01:00
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
|
2022-02-22 23:02:13 +01:00
|
|
|
# Copyright 2017-2022 Mike Fährmann
|
2017-02-20 22:02:49 +01:00
|
|
|
#
|
|
|
|
# This program is free software; you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License version 2 as
|
|
|
|
# published by the Free Software Foundation.
|
|
|
|
|
2017-03-28 13:12:44 +02:00
|
|
|
"""Utility functions and classes"""
|
2017-02-20 22:02:49 +01:00
|
|
|
|
2017-10-09 22:12:58 +02:00
|
|
|
import re
|
2017-03-28 13:12:44 +02:00
|
|
|
import os
|
2017-02-23 21:51:29 +01:00
|
|
|
import sys
|
2019-05-09 16:22:06 +02:00
|
|
|
import json
|
2022-02-22 23:02:13 +01:00
|
|
|
import time
|
2020-10-24 02:55:42 +02:00
|
|
|
import random
|
2018-01-29 22:13:06 +01:00
|
|
|
import sqlite3
|
2021-01-14 03:40:08 +01:00
|
|
|
import binascii
|
2017-10-03 22:38:48 +02:00
|
|
|
import datetime
|
2021-06-03 22:34:58 +02:00
|
|
|
import functools
|
2017-12-03 01:38:24 +01:00
|
|
|
import itertools
|
2022-11-04 17:35:47 +01:00
|
|
|
import subprocess
|
2017-06-16 21:01:40 +02:00
|
|
|
import urllib.parse
|
2020-01-21 21:59:36 +01:00
|
|
|
from http.cookiejar import Cookie
|
2022-02-22 23:02:13 +01:00
|
|
|
from email.utils import mktime_tz, parsedate_tz
|
2022-05-04 12:09:56 +02:00
|
|
|
from . import text, exception
|
2017-02-20 22:02:49 +01:00
|
|
|
|
|
|
|
|
2018-03-14 13:17:34 +01:00
|
|
|
def bencode(num, alphabet="0123456789"):
|
|
|
|
"""Encode an integer into a base-N encoded string"""
|
|
|
|
data = ""
|
|
|
|
base = len(alphabet)
|
|
|
|
while num:
|
|
|
|
num, remainder = divmod(num, base)
|
|
|
|
data = alphabet[remainder] + data
|
|
|
|
return data
|
|
|
|
|
|
|
|
|
2017-06-01 18:14:33 +02:00
|
|
|
def bdecode(data, alphabet="0123456789"):
|
|
|
|
"""Decode a base-N encoded string ( N = len(alphabet) )"""
|
|
|
|
num = 0
|
|
|
|
base = len(alphabet)
|
|
|
|
for c in data:
|
|
|
|
num *= base
|
|
|
|
num += alphabet.index(c)
|
|
|
|
return num
|
|
|
|
|
|
|
|
|
2017-12-03 01:38:24 +01:00
|
|
|
def advance(iterable, num):
|
2020-10-29 23:26:11 +01:00
|
|
|
""""Advance 'iterable' by 'num' steps"""
|
2017-12-03 01:38:24 +01:00
|
|
|
iterator = iter(iterable)
|
|
|
|
next(itertools.islice(iterator, num, num), None)
|
|
|
|
return iterator
|
|
|
|
|
|
|
|
|
2020-10-29 23:26:11 +01:00
|
|
|
def unique(iterable):
|
|
|
|
"""Yield unique elements from 'iterable' while preserving order"""
|
|
|
|
seen = set()
|
|
|
|
add = seen.add
|
|
|
|
for element in iterable:
|
|
|
|
if element not in seen:
|
|
|
|
add(element)
|
|
|
|
yield element
|
|
|
|
|
|
|
|
|
2021-03-02 23:01:38 +01:00
|
|
|
def unique_sequence(iterable):
|
|
|
|
"""Yield sequentially unique elements from 'iterable'"""
|
|
|
|
last = None
|
|
|
|
for element in iterable:
|
|
|
|
if element != last:
|
|
|
|
last = element
|
|
|
|
yield element
|
|
|
|
|
|
|
|
|
2022-04-08 14:40:26 +02:00
|
|
|
def contains(values, elements, separator=" "):
|
2022-03-30 10:36:52 +02:00
|
|
|
"""Returns True if at least one of 'elements' is contained in 'values'"""
|
|
|
|
if isinstance(values, str):
|
2022-04-08 14:40:26 +02:00
|
|
|
values = values.split(separator)
|
|
|
|
|
|
|
|
if not isinstance(elements, (tuple, list)):
|
|
|
|
return elements in values
|
2022-03-30 10:36:52 +02:00
|
|
|
|
2022-04-08 14:40:26 +02:00
|
|
|
for e in elements:
|
|
|
|
if e in values:
|
|
|
|
return True
|
2022-03-30 10:36:52 +02:00
|
|
|
return False
|
|
|
|
|
|
|
|
|
2019-10-28 15:06:17 +01:00
|
|
|
def raises(cls):
|
|
|
|
"""Returns a function that raises 'cls' as exception"""
|
|
|
|
def wrap(*args):
|
|
|
|
raise cls(*args)
|
2018-04-12 17:07:12 +02:00
|
|
|
return wrap
|
|
|
|
|
|
|
|
|
2021-05-04 18:00:38 +02:00
|
|
|
def identity(x):
|
|
|
|
"""Returns its argument"""
|
|
|
|
return x
|
|
|
|
|
|
|
|
|
2021-11-23 19:23:02 +01:00
|
|
|
def true(_):
|
|
|
|
"""Always returns True"""
|
|
|
|
return True
|
|
|
|
|
|
|
|
|
|
|
|
def false(_):
|
|
|
|
"""Always returns False"""
|
|
|
|
return False
|
|
|
|
|
|
|
|
|
2021-05-04 18:00:38 +02:00
|
|
|
def noop():
|
|
|
|
"""Does nothing"""
|
|
|
|
|
|
|
|
|
2021-01-11 22:12:40 +01:00
|
|
|
def generate_token(size=16):
|
|
|
|
"""Generate a random token with hexadecimal digits"""
|
2021-01-14 03:40:08 +01:00
|
|
|
data = random.getrandbits(size * 8).to_bytes(size, "big")
|
|
|
|
return binascii.hexlify(data).decode()
|
2020-10-15 00:43:26 +02:00
|
|
|
|
|
|
|
|
2021-09-28 23:07:55 +02:00
|
|
|
def format_value(value, suffixes="kMGTPEZY"):
|
2021-07-26 01:55:02 +02:00
|
|
|
value = format(value)
|
|
|
|
value_len = len(value)
|
|
|
|
index = value_len - 4
|
|
|
|
if index >= 0:
|
|
|
|
offset = (value_len - 1) % 3 + 1
|
|
|
|
return (value[:offset] + "." + value[offset:offset+2] +
|
2021-09-28 23:07:55 +02:00
|
|
|
suffixes[index // 3])
|
|
|
|
return value
|
2021-07-26 01:55:02 +02:00
|
|
|
|
|
|
|
|
2017-08-12 20:07:27 +02:00
|
|
|
def combine_dict(a, b):
|
2018-10-08 20:28:54 +02:00
|
|
|
"""Recursively combine the contents of 'b' into 'a'"""
|
2017-08-12 20:07:27 +02:00
|
|
|
for key, value in b.items():
|
|
|
|
if key in a and isinstance(value, dict) and isinstance(a[key], dict):
|
|
|
|
combine_dict(a[key], value)
|
|
|
|
else:
|
|
|
|
a[key] = value
|
2017-08-13 14:31:22 +02:00
|
|
|
return a
|
2017-08-12 20:07:27 +02:00
|
|
|
|
|
|
|
|
2018-10-08 20:28:54 +02:00
|
|
|
def transform_dict(a, func):
|
|
|
|
"""Recursively apply 'func' to all values in 'a'"""
|
|
|
|
for key, value in a.items():
|
|
|
|
if isinstance(value, dict):
|
|
|
|
transform_dict(value, func)
|
|
|
|
else:
|
|
|
|
a[key] = func(value)
|
|
|
|
|
|
|
|
|
2019-11-21 16:57:39 +01:00
|
|
|
def filter_dict(a):
|
|
|
|
"""Return a copy of 'a' without "private" entries"""
|
|
|
|
return {k: v for k, v in a.items() if k[0] != "_"}
|
|
|
|
|
|
|
|
|
2020-06-06 23:49:49 +02:00
|
|
|
def delete_items(obj, keys):
|
|
|
|
"""Remove all 'keys' from 'obj'"""
|
|
|
|
for key in keys:
|
|
|
|
if key in obj:
|
|
|
|
del obj[key]
|
|
|
|
|
|
|
|
|
2021-09-18 02:15:42 +02:00
|
|
|
def enumerate_reversed(iterable, start=0, length=None):
|
|
|
|
"""Enumerate 'iterable' and return its elements in reverse order"""
|
|
|
|
if length is None:
|
|
|
|
length = len(iterable)
|
2022-08-01 14:08:44 +02:00
|
|
|
|
|
|
|
try:
|
|
|
|
iterable = zip(range(start-1+length, start-1, -1), reversed(iterable))
|
|
|
|
except TypeError:
|
|
|
|
iterable = list(zip(range(start, start+length), iterable))
|
|
|
|
iterable.reverse()
|
|
|
|
|
|
|
|
return iterable
|
2021-09-18 02:15:42 +02:00
|
|
|
|
|
|
|
|
2019-02-14 11:15:19 +01:00
|
|
|
def number_to_string(value, numbers=(int, float)):
|
2018-10-08 20:28:54 +02:00
|
|
|
"""Convert numbers (int, float) to string; Return everything else as is."""
|
2019-02-14 11:15:19 +01:00
|
|
|
return str(value) if value.__class__ in numbers else value
|
2018-10-08 20:28:54 +02:00
|
|
|
|
|
|
|
|
2019-03-04 21:13:34 +01:00
|
|
|
def to_string(value):
|
|
|
|
"""str() with "better" defaults"""
|
|
|
|
if not value:
|
|
|
|
return ""
|
|
|
|
if value.__class__ is list:
|
|
|
|
try:
|
|
|
|
return ", ".join(value)
|
|
|
|
except Exception:
|
|
|
|
return ", ".join(map(str, value))
|
|
|
|
return str(value)
|
|
|
|
|
|
|
|
|
2022-03-23 22:20:37 +01:00
|
|
|
def datetime_to_timestamp(dt):
|
|
|
|
"""Convert naive UTC datetime to timestamp"""
|
|
|
|
return (dt - EPOCH) / SECOND
|
|
|
|
|
|
|
|
|
|
|
|
def datetime_to_timestamp_string(dt):
|
|
|
|
"""Convert naive UTC datetime to timestamp string"""
|
2021-06-25 22:35:45 +02:00
|
|
|
try:
|
|
|
|
return str((dt - EPOCH) // SECOND)
|
|
|
|
except Exception:
|
|
|
|
return ""
|
|
|
|
|
|
|
|
|
2019-05-09 16:22:06 +02:00
|
|
|
def dump_json(obj, fp=sys.stdout, ensure_ascii=True, indent=4):
|
|
|
|
"""Serialize 'obj' as JSON and write it to 'fp'"""
|
|
|
|
json.dump(
|
|
|
|
obj, fp,
|
|
|
|
ensure_ascii=ensure_ascii,
|
|
|
|
indent=indent,
|
|
|
|
default=str,
|
|
|
|
sort_keys=True,
|
|
|
|
)
|
|
|
|
fp.write("\n")
|
|
|
|
|
|
|
|
|
2020-06-18 15:07:30 +02:00
|
|
|
def dump_response(response, fp, *,
|
|
|
|
headers=False, content=True, hide_auth=True):
|
2020-05-24 17:27:53 +02:00
|
|
|
"""Write the contents of 'response' into a file-like object"""
|
|
|
|
|
|
|
|
if headers:
|
|
|
|
request = response.request
|
|
|
|
req_headers = request.headers.copy()
|
2020-06-18 15:07:30 +02:00
|
|
|
res_headers = response.headers.copy()
|
2020-05-24 17:27:53 +02:00
|
|
|
outfmt = """\
|
|
|
|
{request.method} {request.url}
|
|
|
|
Status: {response.status_code} {response.reason}
|
|
|
|
|
|
|
|
Request Headers
|
|
|
|
---------------
|
|
|
|
{request_headers}
|
|
|
|
|
|
|
|
Response Headers
|
|
|
|
----------------
|
|
|
|
{response_headers}
|
|
|
|
"""
|
|
|
|
if hide_auth:
|
|
|
|
authorization = req_headers.get("Authorization")
|
|
|
|
if authorization:
|
|
|
|
atype, sep, _ = authorization.partition(" ")
|
|
|
|
req_headers["Authorization"] = atype + " ***" if sep else "***"
|
|
|
|
|
2020-06-18 15:07:30 +02:00
|
|
|
cookie = req_headers.get("Cookie")
|
|
|
|
if cookie:
|
2020-05-24 17:27:53 +02:00
|
|
|
req_headers["Cookie"] = ";".join(
|
2020-06-18 15:07:30 +02:00
|
|
|
c.partition("=")[0] + "=***"
|
|
|
|
for c in cookie.split(";")
|
|
|
|
)
|
|
|
|
|
|
|
|
set_cookie = res_headers.get("Set-Cookie")
|
|
|
|
if set_cookie:
|
|
|
|
res_headers["Set-Cookie"] = re.sub(
|
|
|
|
r"(^|, )([^ =]+)=[^,;]*", r"\1\2=***", set_cookie,
|
2020-05-24 17:27:53 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
fp.write(outfmt.format(
|
|
|
|
request=request,
|
|
|
|
response=response,
|
|
|
|
request_headers="\n".join(
|
|
|
|
name + ": " + value
|
|
|
|
for name, value in req_headers.items()
|
|
|
|
),
|
|
|
|
response_headers="\n".join(
|
|
|
|
name + ": " + value
|
2020-06-18 15:07:30 +02:00
|
|
|
for name, value in res_headers.items()
|
2020-05-24 17:27:53 +02:00
|
|
|
),
|
|
|
|
).encode())
|
|
|
|
|
|
|
|
if content:
|
|
|
|
if headers:
|
|
|
|
fp.write(b"\nContent\n-------\n")
|
|
|
|
fp.write(response.content)
|
|
|
|
|
|
|
|
|
2022-11-04 17:35:47 +01:00
|
|
|
@functools.lru_cache(maxsize=None)
|
|
|
|
def git_head():
|
|
|
|
try:
|
|
|
|
out, err = subprocess.Popen(
|
|
|
|
("git", "rev-parse", "--short", "HEAD"),
|
|
|
|
stdout=subprocess.PIPE,
|
|
|
|
stderr=subprocess.PIPE,
|
|
|
|
cwd=os.path.dirname(os.path.abspath(__file__)),
|
|
|
|
).communicate()
|
|
|
|
if out and not err:
|
|
|
|
return out.decode().rstrip()
|
|
|
|
except (OSError, subprocess.SubprocessError):
|
|
|
|
pass
|
|
|
|
return None
|
|
|
|
|
|
|
|
|
2017-10-26 00:04:28 +02:00
|
|
|
def expand_path(path):
|
|
|
|
"""Expand environment variables and tildes (~)"""
|
|
|
|
if not path:
|
|
|
|
return path
|
2017-12-21 21:56:24 +01:00
|
|
|
if not isinstance(path, str):
|
|
|
|
path = os.path.join(*path)
|
2017-10-26 00:04:28 +02:00
|
|
|
return os.path.expandvars(os.path.expanduser(path))
|
|
|
|
|
|
|
|
|
2020-01-17 23:51:07 +01:00
|
|
|
def remove_file(path):
|
|
|
|
try:
|
|
|
|
os.unlink(path)
|
|
|
|
except OSError:
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
|
|
def remove_directory(path):
|
|
|
|
try:
|
|
|
|
os.rmdir(path)
|
|
|
|
except OSError:
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
2022-02-22 23:02:13 +01:00
|
|
|
def set_mtime(path, mtime):
|
|
|
|
try:
|
|
|
|
if isinstance(mtime, str):
|
|
|
|
mtime = mktime_tz(parsedate_tz(mtime))
|
|
|
|
os.utime(path, (time.time(), mtime))
|
|
|
|
except Exception:
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
2022-05-06 13:21:29 +02:00
|
|
|
def cookiestxt_load(fp, cookiejar):
|
|
|
|
"""Parse a Netscape cookies.txt file and add its Cookies to 'cookiejar'"""
|
|
|
|
set_cookie = cookiejar.set_cookie
|
2020-01-21 21:59:36 +01:00
|
|
|
|
2020-01-25 22:57:08 +01:00
|
|
|
for line in fp:
|
|
|
|
|
2022-03-19 15:14:55 +01:00
|
|
|
line = line.lstrip(" ")
|
2020-01-25 22:57:08 +01:00
|
|
|
# strip '#HttpOnly_'
|
|
|
|
if line.startswith("#HttpOnly_"):
|
|
|
|
line = line[10:]
|
|
|
|
# ignore empty lines and comments
|
2022-03-19 15:14:55 +01:00
|
|
|
if not line or line[0] in ("#", "$", "\n"):
|
2020-01-25 22:57:08 +01:00
|
|
|
continue
|
|
|
|
# strip trailing '\n'
|
|
|
|
if line[-1] == "\n":
|
|
|
|
line = line[:-1]
|
|
|
|
|
|
|
|
domain, domain_specified, path, secure, expires, name, value = \
|
|
|
|
line.split("\t")
|
2022-05-06 13:21:29 +02:00
|
|
|
|
2020-01-25 22:57:08 +01:00
|
|
|
if not name:
|
|
|
|
name = value
|
|
|
|
value = None
|
|
|
|
|
2022-05-06 13:21:29 +02:00
|
|
|
set_cookie(Cookie(
|
2020-01-25 22:57:08 +01:00
|
|
|
0, name, value,
|
|
|
|
None, False,
|
|
|
|
domain,
|
|
|
|
domain_specified == "TRUE",
|
|
|
|
domain.startswith("."),
|
|
|
|
path, False,
|
|
|
|
secure == "TRUE",
|
|
|
|
None if expires == "0" or not expires else expires,
|
|
|
|
False, None, None, {},
|
|
|
|
))
|
2020-01-21 21:59:36 +01:00
|
|
|
|
|
|
|
|
2022-05-06 13:21:29 +02:00
|
|
|
def cookiestxt_store(fp, cookies):
|
2020-01-25 22:57:08 +01:00
|
|
|
"""Write 'cookies' in Netscape cookies.txt format to 'fp'"""
|
2022-05-06 13:21:29 +02:00
|
|
|
write = fp.write
|
|
|
|
write("# Netscape HTTP Cookie File\n\n")
|
2020-01-25 22:57:08 +01:00
|
|
|
|
|
|
|
for cookie in cookies:
|
2022-03-19 15:14:55 +01:00
|
|
|
if not cookie.domain:
|
|
|
|
continue
|
|
|
|
|
2020-01-25 22:57:08 +01:00
|
|
|
if cookie.value is None:
|
|
|
|
name = ""
|
|
|
|
value = cookie.name
|
|
|
|
else:
|
|
|
|
name = cookie.name
|
|
|
|
value = cookie.value
|
|
|
|
|
2022-05-06 13:21:29 +02:00
|
|
|
write("\t".join((
|
2020-01-25 22:57:08 +01:00
|
|
|
cookie.domain,
|
|
|
|
"TRUE" if cookie.domain.startswith(".") else "FALSE",
|
|
|
|
cookie.path,
|
|
|
|
"TRUE" if cookie.secure else "FALSE",
|
|
|
|
"0" if cookie.expires is None else str(cookie.expires),
|
|
|
|
name,
|
2022-05-06 13:21:29 +02:00
|
|
|
value + "\n",
|
|
|
|
)))
|
2020-01-21 21:59:36 +01:00
|
|
|
|
|
|
|
|
2017-08-08 19:22:04 +02:00
|
|
|
def code_to_language(code, default=None):
|
2017-03-28 13:12:44 +02:00
|
|
|
"""Map an ISO 639-1 language code to its actual name"""
|
2017-08-08 19:22:04 +02:00
|
|
|
return CODES.get((code or "").lower(), default)
|
2017-03-28 13:12:44 +02:00
|
|
|
|
|
|
|
|
2017-08-08 19:22:04 +02:00
|
|
|
def language_to_code(lang, default=None):
|
2017-03-28 13:12:44 +02:00
|
|
|
"""Map a language name to its ISO 639-1 code"""
|
2017-08-04 15:01:10 +02:00
|
|
|
if lang is None:
|
2017-08-08 19:22:04 +02:00
|
|
|
return default
|
2017-03-28 13:12:44 +02:00
|
|
|
lang = lang.capitalize()
|
2017-06-16 21:01:40 +02:00
|
|
|
for code, language in CODES.items():
|
2017-03-28 13:12:44 +02:00
|
|
|
if language == lang:
|
|
|
|
return code
|
|
|
|
return default
|
|
|
|
|
|
|
|
|
2017-06-16 21:01:40 +02:00
|
|
|
CODES = {
|
2017-03-28 13:12:44 +02:00
|
|
|
"ar": "Arabic",
|
2018-03-05 18:37:21 +01:00
|
|
|
"bg": "Bulgarian",
|
|
|
|
"ca": "Catalan",
|
2017-03-28 13:12:44 +02:00
|
|
|
"cs": "Czech",
|
|
|
|
"da": "Danish",
|
|
|
|
"de": "German",
|
|
|
|
"el": "Greek",
|
|
|
|
"en": "English",
|
|
|
|
"es": "Spanish",
|
|
|
|
"fi": "Finnish",
|
|
|
|
"fr": "French",
|
|
|
|
"he": "Hebrew",
|
|
|
|
"hu": "Hungarian",
|
|
|
|
"id": "Indonesian",
|
|
|
|
"it": "Italian",
|
2021-05-22 16:07:04 +02:00
|
|
|
"ja": "Japanese",
|
2017-03-28 13:12:44 +02:00
|
|
|
"ko": "Korean",
|
|
|
|
"ms": "Malay",
|
|
|
|
"nl": "Dutch",
|
|
|
|
"no": "Norwegian",
|
|
|
|
"pl": "Polish",
|
|
|
|
"pt": "Portuguese",
|
|
|
|
"ro": "Romanian",
|
|
|
|
"ru": "Russian",
|
|
|
|
"sv": "Swedish",
|
|
|
|
"th": "Thai",
|
|
|
|
"tr": "Turkish",
|
|
|
|
"vi": "Vietnamese",
|
|
|
|
"zh": "Chinese",
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2022-10-07 11:55:37 +02:00
|
|
|
def parse_inputfile(file, log):
|
|
|
|
"""Filter and process strings from an input file.
|
|
|
|
|
|
|
|
Lines starting with '#' and empty lines will be ignored.
|
|
|
|
Lines starting with '-' will be interpreted as a key-value pair separated
|
|
|
|
by an '='. where 'key' is a dot-separated option name and 'value' is a
|
|
|
|
JSON-parsable value. These configuration options will be applied while
|
|
|
|
processing the next URL.
|
|
|
|
Lines starting with '-G' are the same as above, except these options will
|
|
|
|
be applied for *all* following URLs, i.e. they are Global.
|
|
|
|
Everything else will be used as a potential URL.
|
|
|
|
|
|
|
|
Example input file:
|
|
|
|
|
|
|
|
# settings global options
|
|
|
|
-G base-directory = "/tmp/"
|
|
|
|
-G skip = false
|
|
|
|
|
|
|
|
# setting local options for the next URL
|
|
|
|
-filename="spaces_are_optional.jpg"
|
|
|
|
-skip = true
|
|
|
|
|
|
|
|
https://example.org/
|
|
|
|
|
|
|
|
# next URL uses default filename and 'skip' is false.
|
|
|
|
https://example.com/index.htm # comment1
|
|
|
|
https://example.com/404.htm # comment2
|
|
|
|
"""
|
|
|
|
gconf = []
|
|
|
|
lconf = []
|
|
|
|
strip_comment = None
|
|
|
|
|
|
|
|
for line in file:
|
|
|
|
line = line.strip()
|
|
|
|
|
|
|
|
if not line or line[0] == "#":
|
|
|
|
# empty line or comment
|
|
|
|
continue
|
|
|
|
|
|
|
|
elif line[0] == "-":
|
|
|
|
# config spec
|
|
|
|
if len(line) >= 2 and line[1] == "G":
|
|
|
|
conf = gconf
|
|
|
|
line = line[2:]
|
|
|
|
else:
|
|
|
|
conf = lconf
|
|
|
|
line = line[1:]
|
|
|
|
|
|
|
|
key, sep, value = line.partition("=")
|
|
|
|
if not sep:
|
|
|
|
log.warning("input file: invalid <key>=<value> pair: %s", line)
|
|
|
|
continue
|
|
|
|
|
|
|
|
try:
|
|
|
|
value = json.loads(value.strip())
|
|
|
|
except ValueError as exc:
|
|
|
|
log.warning("input file: unable to parse '%s': %s", value, exc)
|
|
|
|
continue
|
|
|
|
|
|
|
|
key = key.strip().split(".")
|
|
|
|
conf.append((key[:-1], key[-1], value))
|
|
|
|
|
|
|
|
else:
|
|
|
|
# url
|
|
|
|
if " #" in line or "\t#" in line:
|
|
|
|
if strip_comment is None:
|
|
|
|
strip_comment = re.compile(r"\s+#.*").sub
|
|
|
|
line = strip_comment("", line)
|
|
|
|
if gconf or lconf:
|
|
|
|
yield ExtendedUrl(line, gconf, lconf)
|
|
|
|
gconf = []
|
|
|
|
lconf = []
|
|
|
|
else:
|
|
|
|
yield line
|
|
|
|
|
|
|
|
|
2019-02-13 17:39:43 +01:00
|
|
|
class UniversalNone():
|
2019-02-14 11:15:19 +01:00
|
|
|
"""None-style object that supports more operations than None itself"""
|
|
|
|
__slots__ = ()
|
|
|
|
|
|
|
|
def __getattribute__(self, _):
|
2019-02-13 17:39:43 +01:00
|
|
|
return self
|
|
|
|
|
|
|
|
def __getitem__(self, _):
|
|
|
|
return self
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def __bool__():
|
|
|
|
return False
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def __str__():
|
|
|
|
return "None"
|
|
|
|
|
|
|
|
__repr__ = __str__
|
|
|
|
|
|
|
|
|
|
|
|
NONE = UniversalNone()
|
2021-06-25 22:35:45 +02:00
|
|
|
EPOCH = datetime.datetime(1970, 1, 1)
|
|
|
|
SECOND = datetime.timedelta(0, 1)
|
2020-05-19 21:42:11 +02:00
|
|
|
WINDOWS = (os.name == "nt")
|
2020-05-19 21:47:18 +02:00
|
|
|
SENTINEL = object()
|
2021-06-03 22:34:58 +02:00
|
|
|
SPECIAL_EXTRACTORS = {"oauth", "recursive", "test"}
|
|
|
|
GLOBALS = {
|
2022-03-30 10:36:52 +02:00
|
|
|
"contains" : contains,
|
2021-06-03 22:34:58 +02:00
|
|
|
"parse_int": text.parse_int,
|
|
|
|
"urlsplit" : urllib.parse.urlsplit,
|
|
|
|
"datetime" : datetime.datetime,
|
2022-02-23 00:02:10 +01:00
|
|
|
"timedelta": datetime.timedelta,
|
2021-06-03 22:34:58 +02:00
|
|
|
"abort" : raises(exception.StopExtraction),
|
|
|
|
"terminate": raises(exception.TerminateExtraction),
|
|
|
|
"re" : re,
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
def compile_expression(expr, name="<expr>", globals=GLOBALS):
|
|
|
|
code_object = compile(expr, name, "eval")
|
|
|
|
return functools.partial(eval, code_object, globals)
|
2019-02-13 17:39:43 +01:00
|
|
|
|
|
|
|
|
2021-09-14 17:40:05 +02:00
|
|
|
def build_duration_func(duration, min=0.0):
|
|
|
|
if not duration:
|
2022-04-27 20:28:14 +02:00
|
|
|
if min:
|
|
|
|
return lambda: min
|
2021-09-14 17:40:05 +02:00
|
|
|
return None
|
|
|
|
|
2021-12-18 23:04:33 +01:00
|
|
|
if isinstance(duration, str):
|
|
|
|
lower, _, upper = duration.partition("-")
|
|
|
|
lower = float(lower)
|
2021-09-14 17:40:05 +02:00
|
|
|
else:
|
2021-12-18 23:04:33 +01:00
|
|
|
try:
|
|
|
|
lower, upper = duration
|
|
|
|
except TypeError:
|
|
|
|
lower, upper = duration, None
|
|
|
|
|
|
|
|
if upper:
|
|
|
|
upper = float(upper)
|
2021-09-14 17:40:05 +02:00
|
|
|
return functools.partial(
|
|
|
|
random.uniform,
|
|
|
|
lower if lower > min else min,
|
|
|
|
upper if upper > min else min,
|
|
|
|
)
|
2021-12-18 23:04:33 +01:00
|
|
|
else:
|
|
|
|
if lower < min:
|
|
|
|
lower = min
|
|
|
|
return lambda: lower
|
2021-09-14 17:40:05 +02:00
|
|
|
|
|
|
|
|
2021-11-23 19:23:02 +01:00
|
|
|
def build_extractor_filter(categories, negate=True, special=None):
|
|
|
|
"""Build a function that takes an Extractor class as argument
|
|
|
|
and returns True if that class is allowed by 'categories'
|
|
|
|
"""
|
|
|
|
if isinstance(categories, str):
|
|
|
|
categories = categories.split(",")
|
|
|
|
|
|
|
|
catset = set() # set of categories / basecategories
|
|
|
|
subset = set() # set of subcategories
|
|
|
|
catsub = [] # list of category-subcategory pairs
|
|
|
|
|
|
|
|
for item in categories:
|
|
|
|
category, _, subcategory = item.partition(":")
|
|
|
|
if category and category != "*":
|
|
|
|
if subcategory and subcategory != "*":
|
|
|
|
catsub.append((category, subcategory))
|
|
|
|
else:
|
|
|
|
catset.add(category)
|
|
|
|
elif subcategory and subcategory != "*":
|
|
|
|
subset.add(subcategory)
|
|
|
|
|
|
|
|
if special:
|
|
|
|
catset |= special
|
|
|
|
elif not catset and not subset and not catsub:
|
|
|
|
return true if negate else false
|
|
|
|
|
|
|
|
tests = []
|
|
|
|
|
|
|
|
if negate:
|
|
|
|
if catset:
|
|
|
|
tests.append(lambda extr:
|
|
|
|
extr.category not in catset and
|
|
|
|
extr.basecategory not in catset)
|
|
|
|
if subset:
|
|
|
|
tests.append(lambda extr: extr.subcategory not in subset)
|
|
|
|
else:
|
|
|
|
if catset:
|
|
|
|
tests.append(lambda extr:
|
|
|
|
extr.category in catset or
|
|
|
|
extr.basecategory in catset)
|
|
|
|
if subset:
|
|
|
|
tests.append(lambda extr: extr.subcategory in subset)
|
|
|
|
|
|
|
|
if catsub:
|
|
|
|
def test(extr):
|
|
|
|
for category, subcategory in catsub:
|
|
|
|
if category in (extr.category, extr.basecategory) and \
|
|
|
|
subcategory == extr.subcategory:
|
|
|
|
return not negate
|
|
|
|
return negate
|
|
|
|
tests.append(test)
|
|
|
|
|
|
|
|
if len(tests) == 1:
|
|
|
|
return tests[0]
|
|
|
|
if negate:
|
|
|
|
return lambda extr: all(t(extr) for t in tests)
|
|
|
|
else:
|
|
|
|
return lambda extr: any(t(extr) for t in tests)
|
|
|
|
|
|
|
|
|
2022-03-10 23:32:16 +01:00
|
|
|
def build_proxy_map(proxies, log=None):
|
|
|
|
"""Generate a proxy map"""
|
|
|
|
if not proxies:
|
|
|
|
return None
|
|
|
|
|
|
|
|
if isinstance(proxies, str):
|
|
|
|
if "://" not in proxies:
|
|
|
|
proxies = "http://" + proxies.lstrip("/")
|
|
|
|
return {"http": proxies, "https": proxies}
|
|
|
|
|
|
|
|
if isinstance(proxies, dict):
|
|
|
|
for scheme, proxy in proxies.items():
|
|
|
|
if "://" not in proxy:
|
|
|
|
proxies[scheme] = "http://" + proxy.lstrip("/")
|
|
|
|
return proxies
|
|
|
|
|
|
|
|
if log:
|
|
|
|
log.warning("invalid proxy specifier: %s", proxies)
|
|
|
|
|
|
|
|
|
2017-09-06 17:08:50 +02:00
|
|
|
def build_predicate(predicates):
|
|
|
|
if not predicates:
|
2021-07-19 02:23:20 +02:00
|
|
|
return lambda url, kwdict: True
|
2017-09-06 17:08:50 +02:00
|
|
|
elif len(predicates) == 1:
|
|
|
|
return predicates[0]
|
2021-07-19 02:43:23 +02:00
|
|
|
return functools.partial(chain_predicates, predicates)
|
|
|
|
|
|
|
|
|
|
|
|
def chain_predicates(predicates, url, kwdict):
|
|
|
|
for pred in predicates:
|
|
|
|
if not pred(url, kwdict):
|
|
|
|
return False
|
|
|
|
return True
|
2017-09-06 17:08:50 +02:00
|
|
|
|
|
|
|
|
2017-02-23 21:51:29 +01:00
|
|
|
class RangePredicate():
|
2017-09-06 17:08:50 +02:00
|
|
|
"""Predicate; True if the current index is in the given range"""
|
2018-10-07 21:34:25 +02:00
|
|
|
def __init__(self, rangespec):
|
|
|
|
self.ranges = self.optimize_range(self.parse_range(rangespec))
|
2017-02-23 21:51:29 +01:00
|
|
|
self.index = 0
|
2018-10-07 21:34:25 +02:00
|
|
|
|
2017-03-03 17:26:50 +01:00
|
|
|
if self.ranges:
|
|
|
|
self.lower, self.upper = self.ranges[0][0], self.ranges[-1][1]
|
|
|
|
else:
|
|
|
|
self.lower, self.upper = 0, 0
|
2017-02-23 21:51:29 +01:00
|
|
|
|
2021-07-19 02:23:20 +02:00
|
|
|
def __call__(self, url, _):
|
2017-02-23 21:51:29 +01:00
|
|
|
self.index += 1
|
|
|
|
|
2017-03-03 17:26:50 +01:00
|
|
|
if self.index > self.upper:
|
2017-02-23 21:51:29 +01:00
|
|
|
raise exception.StopExtraction()
|
|
|
|
|
|
|
|
for lower, upper in self.ranges:
|
|
|
|
if lower <= self.index <= upper:
|
|
|
|
return True
|
|
|
|
return False
|
2017-03-28 13:12:44 +02:00
|
|
|
|
2018-10-07 21:34:25 +02:00
|
|
|
@staticmethod
|
|
|
|
def parse_range(rangespec):
|
|
|
|
"""Parse an integer range string and return the resulting ranges
|
|
|
|
|
|
|
|
Examples:
|
|
|
|
parse_range("-2,4,6-8,10-") -> [(1,2), (4,4), (6,8), (10,INTMAX)]
|
|
|
|
parse_range(" - 3 , 4- 4, 2-6") -> [(1,3), (4,4), (2,6)]
|
|
|
|
"""
|
|
|
|
ranges = []
|
|
|
|
|
|
|
|
for group in rangespec.split(","):
|
|
|
|
if not group:
|
|
|
|
continue
|
|
|
|
first, sep, last = group.partition("-")
|
|
|
|
if not sep:
|
|
|
|
beg = end = int(first)
|
|
|
|
else:
|
|
|
|
beg = int(first) if first.strip() else 1
|
|
|
|
end = int(last) if last.strip() else sys.maxsize
|
|
|
|
ranges.append((beg, end) if beg <= end else (end, beg))
|
|
|
|
|
|
|
|
return ranges
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def optimize_range(ranges):
|
|
|
|
"""Simplify/Combine a parsed list of ranges
|
|
|
|
|
|
|
|
Examples:
|
|
|
|
optimize_range([(2,4), (4,6), (5,8)]) -> [(2,8)]
|
|
|
|
optimize_range([(1,1), (2,2), (3,6), (8,9))]) -> [(1,6), (8,9)]
|
|
|
|
"""
|
|
|
|
if len(ranges) <= 1:
|
|
|
|
return ranges
|
|
|
|
|
|
|
|
ranges.sort()
|
|
|
|
riter = iter(ranges)
|
|
|
|
result = []
|
|
|
|
|
|
|
|
beg, end = next(riter)
|
|
|
|
for lower, upper in riter:
|
|
|
|
if lower > end+1:
|
|
|
|
result.append((beg, end))
|
|
|
|
beg, end = lower, upper
|
|
|
|
elif upper > end:
|
|
|
|
end = upper
|
|
|
|
result.append((beg, end))
|
|
|
|
return result
|
|
|
|
|
2017-03-28 13:12:44 +02:00
|
|
|
|
2017-09-06 17:08:50 +02:00
|
|
|
class UniquePredicate():
|
|
|
|
"""Predicate; True if given URL has not been encountered before"""
|
|
|
|
def __init__(self):
|
|
|
|
self.urls = set()
|
|
|
|
|
2021-07-19 02:23:20 +02:00
|
|
|
def __call__(self, url, _):
|
2018-02-20 18:14:27 +01:00
|
|
|
if url.startswith("text:"):
|
|
|
|
return True
|
2017-09-06 17:08:50 +02:00
|
|
|
if url not in self.urls:
|
|
|
|
self.urls.add(url)
|
|
|
|
return True
|
|
|
|
return False
|
|
|
|
|
|
|
|
|
2017-09-08 17:52:00 +02:00
|
|
|
class FilterPredicate():
|
|
|
|
"""Predicate; True if evaluating the given expression returns True"""
|
|
|
|
|
2021-06-03 22:34:58 +02:00
|
|
|
def __init__(self, expr, target="image"):
|
2018-10-07 21:34:25 +02:00
|
|
|
name = "<{} filter>".format(target)
|
2021-06-03 22:34:58 +02:00
|
|
|
self.expr = compile_expression(expr, name)
|
2017-09-08 17:52:00 +02:00
|
|
|
|
2021-06-03 22:34:58 +02:00
|
|
|
def __call__(self, _, kwdict):
|
2017-09-08 17:52:00 +02:00
|
|
|
try:
|
2021-06-03 22:34:58 +02:00
|
|
|
return self.expr(kwdict)
|
2018-04-12 17:07:12 +02:00
|
|
|
except exception.GalleryDLException:
|
|
|
|
raise
|
2017-09-08 17:52:00 +02:00
|
|
|
except Exception as exc:
|
|
|
|
raise exception.FilterError(exc)
|
|
|
|
|
|
|
|
|
2018-02-07 21:47:27 +01:00
|
|
|
class ExtendedUrl():
|
2018-02-15 21:15:33 +01:00
|
|
|
"""URL with attached config key-value pairs"""
|
|
|
|
def __init__(self, url, gconf, lconf):
|
|
|
|
self.value, self.gconfig, self.lconfig = url, gconf, lconf
|
2018-02-07 21:47:27 +01:00
|
|
|
|
|
|
|
def __str__(self):
|
|
|
|
return self.value
|
|
|
|
|
|
|
|
|
2018-01-29 22:13:06 +01:00
|
|
|
class DownloadArchive():
|
|
|
|
|
2022-03-20 21:16:46 +01:00
|
|
|
def __init__(self, path, format_string, cache_key="_archive_key"):
|
2022-05-17 12:57:01 +02:00
|
|
|
try:
|
|
|
|
con = sqlite3.connect(path, timeout=60, check_same_thread=False)
|
|
|
|
except sqlite3.OperationalError:
|
|
|
|
os.makedirs(os.path.dirname(path))
|
|
|
|
con = sqlite3.connect(path, timeout=60, check_same_thread=False)
|
2018-01-29 22:13:06 +01:00
|
|
|
con.isolation_level = None
|
2022-03-20 21:16:46 +01:00
|
|
|
|
2019-09-10 22:26:40 +02:00
|
|
|
self.close = con.close
|
2018-01-29 22:13:06 +01:00
|
|
|
self.cursor = con.cursor()
|
2022-05-04 12:09:56 +02:00
|
|
|
|
|
|
|
from . import formatter
|
2022-04-28 20:26:12 +02:00
|
|
|
self.keygen = formatter.parse(format_string).format_map
|
2022-03-20 21:16:46 +01:00
|
|
|
self._cache_key = cache_key
|
2020-01-03 22:58:28 +01:00
|
|
|
|
|
|
|
try:
|
|
|
|
self.cursor.execute("CREATE TABLE IF NOT EXISTS archive "
|
2022-10-21 15:56:40 +02:00
|
|
|
"(entry TEXT PRIMARY KEY) WITHOUT ROWID")
|
2020-01-03 22:58:28 +01:00
|
|
|
except sqlite3.OperationalError:
|
|
|
|
# fallback for missing WITHOUT ROWID support (#553)
|
|
|
|
self.cursor.execute("CREATE TABLE IF NOT EXISTS archive "
|
2022-10-21 15:56:40 +02:00
|
|
|
"(entry TEXT PRIMARY KEY)")
|
2018-01-29 22:13:06 +01:00
|
|
|
|
2020-09-23 15:00:27 +02:00
|
|
|
def check(self, kwdict):
|
2019-08-16 21:18:56 +02:00
|
|
|
"""Return True if the item described by 'kwdict' exists in archive"""
|
2022-03-20 21:16:46 +01:00
|
|
|
key = kwdict[self._cache_key] = self.keygen(kwdict)
|
2018-01-29 22:13:06 +01:00
|
|
|
self.cursor.execute(
|
2018-02-13 23:45:30 +01:00
|
|
|
"SELECT 1 FROM archive WHERE entry=? LIMIT 1", (key,))
|
2018-01-29 22:13:06 +01:00
|
|
|
return self.cursor.fetchone()
|
|
|
|
|
2018-02-13 23:45:30 +01:00
|
|
|
def add(self, kwdict):
|
|
|
|
"""Add item described by 'kwdict' to archive"""
|
2022-03-20 21:16:46 +01:00
|
|
|
key = kwdict.get(self._cache_key) or self.keygen(kwdict)
|
2018-01-29 22:13:06 +01:00
|
|
|
self.cursor.execute(
|
2022-10-21 15:56:40 +02:00
|
|
|
"INSERT OR IGNORE INTO archive (entry) VALUES (?)", (key,))
|