2015-04-05 16:23:20 +02:00
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
|
2018-01-17 15:49:46 +01:00
|
|
|
# Copyright 2015-2018 Mike Fährmann
|
2015-04-05 16:23:20 +02:00
|
|
|
#
|
|
|
|
# This program is free software; you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License version 2 as
|
|
|
|
# published by the Free Software Foundation.
|
|
|
|
|
2017-04-12 18:43:41 +02:00
|
|
|
import sys
|
2017-12-04 17:06:17 +01:00
|
|
|
import time
|
2015-12-12 01:16:02 +01:00
|
|
|
import json
|
|
|
|
import hashlib
|
2018-02-01 20:49:41 +01:00
|
|
|
import logging
|
2018-05-20 22:03:57 +02:00
|
|
|
from . import extractor, downloader, postprocessor
|
2018-10-13 17:21:55 +02:00
|
|
|
from . import config, text, util, output, exception
|
2015-11-24 19:47:51 +01:00
|
|
|
from .extractor.message import Message
|
2015-04-05 16:23:20 +02:00
|
|
|
|
2017-01-30 19:40:15 +01:00
|
|
|
|
2015-12-12 00:11:05 +01:00
|
|
|
class Job():
|
|
|
|
"""Base class for Job-types"""
|
implement logging options
Standard logging to stderr, logfiles, and unsupported URL files (which
are now handled through the logging module) can now be configured by
setting their respective option keys (log, logfile, unsupportedfile)
to a dict and specifying the following options;
- format:
format string for logging messages
available keys: see [1]
default: "[{name}][{levelname}] {message}"
- format-date:
format string for {asctime} fields in logging messages
available keys: see [2]
default: "%Y-%m-%d %H:%M:%S"
- level:
the lowercase levelname until which the logger should activate;
available levels are debug, info, warning, error, exception
default: "info"
- path:
path of the file to be written to
- mode:
'mode' argument when opening the specified file
can be either "w" to truncate the file or "a" to append to it (see [3])
If 'output.log', '.logfile', or '.unsupportedfile' is a string, it will
be interpreted, as it has been, as the filepath
(or as format string for .log)
[1] https://docs.python.org/3/library/logging.html#logrecord-attributes
[2] https://docs.python.org/3/library/time.html#time.strftime
[3] https://docs.python.org/3/library/functions.html#open
2018-05-01 17:54:52 +02:00
|
|
|
ulog = None
|
2015-04-08 01:51:48 +02:00
|
|
|
|
2017-09-30 18:52:23 +02:00
|
|
|
def __init__(self, url, parent=None):
|
2017-02-25 23:53:31 +01:00
|
|
|
self.url = url
|
2015-11-21 00:30:31 +01:00
|
|
|
self.extractor = extractor.find(url)
|
2015-06-28 12:45:52 +02:00
|
|
|
if self.extractor is None:
|
2016-07-14 14:25:56 +02:00
|
|
|
raise exception.NoExtractorError(url)
|
2017-06-02 09:10:58 +02:00
|
|
|
self.extractor.log.debug(
|
2017-12-05 23:29:11 +01:00
|
|
|
"Using %s for '%s'", self.extractor.__class__.__name__, url)
|
2015-12-12 00:11:05 +01:00
|
|
|
|
2017-09-06 17:08:50 +02:00
|
|
|
# url predicates
|
2018-10-07 21:34:25 +02:00
|
|
|
self.pred_url = self._prepare_predicates(
|
|
|
|
"image", [util.UniquePredicate()], True)
|
2017-09-06 17:08:50 +02:00
|
|
|
|
|
|
|
# queue predicates
|
2018-10-07 21:34:25 +02:00
|
|
|
self.pred_queue = self._prepare_predicates(
|
|
|
|
"chapter", [], False)
|
2016-09-24 10:45:11 +02:00
|
|
|
|
2017-09-30 18:52:23 +02:00
|
|
|
# category transfer
|
|
|
|
if parent and parent.extractor.categorytransfer:
|
|
|
|
self.extractor.category = parent.extractor.category
|
|
|
|
self.extractor.subcategory = parent.extractor.subcategory
|
|
|
|
|
2018-02-08 23:10:58 +01:00
|
|
|
# user-supplied metadata
|
|
|
|
self.userkwds = self.extractor.config("keywords")
|
|
|
|
|
2017-02-23 21:51:29 +01:00
|
|
|
def run(self):
|
|
|
|
"""Execute or run the job"""
|
|
|
|
try:
|
2017-03-11 01:47:57 +01:00
|
|
|
log = self.extractor.log
|
2017-02-23 21:51:29 +01:00
|
|
|
for msg in self.extractor:
|
2017-02-26 02:06:56 +01:00
|
|
|
self.dispatch(msg)
|
2018-08-11 23:54:25 +02:00
|
|
|
except exception.AuthenticationError as exc:
|
|
|
|
msg = str(exc) or "Please provide a valid username/password pair."
|
|
|
|
log.error("Authentication failed: %s", msg)
|
2017-02-25 23:53:31 +01:00
|
|
|
except exception.AuthorizationError:
|
2017-03-11 01:47:57 +01:00
|
|
|
log.error("You do not have permission to access the resource "
|
|
|
|
"at '%s'", self.url)
|
2017-08-05 16:11:46 +02:00
|
|
|
except exception.NotFoundError as exc:
|
|
|
|
res = str(exc) or "resource (gallery/image/user)"
|
2017-03-11 01:47:57 +01:00
|
|
|
log.error("The %s at '%s' does not exist", res, self.url)
|
2017-08-05 16:11:46 +02:00
|
|
|
except exception.HttpError as exc:
|
2018-11-19 12:33:34 +01:00
|
|
|
err = exc.args[0]
|
|
|
|
if isinstance(err, Exception):
|
|
|
|
err = "{}: {}".format(err.__class__.__name__, err)
|
|
|
|
log.error("HTTP request failed: %s", err)
|
2017-08-11 21:48:37 +02:00
|
|
|
except exception.FormatError as exc:
|
|
|
|
err, obj = exc.args
|
2017-09-08 17:52:00 +02:00
|
|
|
log.error("Applying %s format string failed: %s: %s",
|
2017-08-11 21:48:37 +02:00
|
|
|
obj, err.__class__.__name__, err)
|
2017-09-08 17:52:00 +02:00
|
|
|
except exception.FilterError as exc:
|
|
|
|
err = exc.args[0]
|
|
|
|
log.error("Evaluating filter expression failed: %s: %s",
|
|
|
|
err.__class__.__name__, err)
|
2017-02-23 21:51:29 +01:00
|
|
|
except exception.StopExtraction:
|
|
|
|
pass
|
2017-08-10 16:29:05 +02:00
|
|
|
except OSError as exc:
|
|
|
|
log.error("Unable to download data: %s", exc)
|
2017-04-18 11:38:48 +02:00
|
|
|
except Exception as exc:
|
2017-08-10 16:29:05 +02:00
|
|
|
log.error(("An unexpected error occurred: %s - %s. "
|
|
|
|
"Please run gallery-dl again with the --verbose flag, "
|
|
|
|
"copy its output and report this issue on "
|
|
|
|
"https://github.com/mikf/gallery-dl/issues ."),
|
|
|
|
exc.__class__.__name__, exc)
|
|
|
|
log.debug("Traceback", exc_info=True)
|
2018-06-08 17:39:02 +02:00
|
|
|
self.handle_finalize()
|
2017-04-18 11:38:48 +02:00
|
|
|
|
2017-02-26 02:06:56 +01:00
|
|
|
def dispatch(self, msg):
|
|
|
|
"""Call the appropriate message handler"""
|
2017-03-17 09:39:46 +01:00
|
|
|
if msg[0] == Message.Url:
|
2017-09-06 17:08:50 +02:00
|
|
|
_, url, kwds = msg
|
|
|
|
if self.pred_url(url, kwds):
|
|
|
|
self.update_kwdict(kwds)
|
|
|
|
self.handle_url(url, kwds)
|
2017-02-26 02:06:56 +01:00
|
|
|
|
|
|
|
elif msg[0] == Message.Directory:
|
|
|
|
self.update_kwdict(msg[1])
|
|
|
|
self.handle_directory(msg[1])
|
|
|
|
|
2017-03-17 09:39:46 +01:00
|
|
|
elif msg[0] == Message.Queue:
|
2017-09-12 16:19:00 +02:00
|
|
|
_, url, kwds = msg
|
|
|
|
if self.pred_queue(url, kwds):
|
|
|
|
self.handle_queue(url, kwds)
|
2017-02-26 02:06:56 +01:00
|
|
|
|
2018-01-17 22:08:19 +01:00
|
|
|
elif msg[0] == Message.Urllist:
|
|
|
|
_, urls, kwds = msg
|
|
|
|
if self.pred_url(urls[0], kwds):
|
|
|
|
self.update_kwdict(kwds)
|
|
|
|
self.handle_urllist(urls, kwds)
|
|
|
|
|
2017-02-26 02:06:56 +01:00
|
|
|
elif msg[0] == Message.Version:
|
|
|
|
if msg[1] != 1:
|
|
|
|
raise "unsupported message-version ({}, {})".format(
|
|
|
|
self.extractor.category, msg[1]
|
|
|
|
)
|
|
|
|
# TODO: support for multiple message versions
|
|
|
|
|
2017-04-12 18:43:41 +02:00
|
|
|
def handle_url(self, url, keywords):
|
2016-09-24 10:45:11 +02:00
|
|
|
"""Handle Message.Url"""
|
|
|
|
|
2018-01-17 22:08:19 +01:00
|
|
|
def handle_urllist(self, urls, keywords):
|
|
|
|
"""Handle Message.Urllist"""
|
|
|
|
self.handle_url(urls[0], keywords)
|
|
|
|
|
2016-09-24 10:45:11 +02:00
|
|
|
def handle_directory(self, keywords):
|
|
|
|
"""Handle Message.Directory"""
|
|
|
|
|
2017-09-12 16:19:00 +02:00
|
|
|
def handle_queue(self, url, keywords):
|
2016-09-24 10:45:11 +02:00
|
|
|
"""Handle Message.Queue"""
|
|
|
|
|
2018-06-08 17:39:02 +02:00
|
|
|
def handle_finalize(self):
|
|
|
|
"""Handle job finalization"""
|
|
|
|
|
2016-09-24 10:45:11 +02:00
|
|
|
def update_kwdict(self, kwdict):
|
2018-02-08 23:10:58 +01:00
|
|
|
"""Update 'kwdict' with additional metadata"""
|
2016-09-24 10:45:11 +02:00
|
|
|
kwdict["category"] = self.extractor.category
|
|
|
|
kwdict["subcategory"] = self.extractor.subcategory
|
2018-02-08 23:10:58 +01:00
|
|
|
if self.userkwds:
|
|
|
|
kwdict.update(self.userkwds)
|
2015-12-12 00:11:05 +01:00
|
|
|
|
2018-10-07 21:34:25 +02:00
|
|
|
def _prepare_predicates(self, target, predicates, skip=True):
|
|
|
|
pfilter = self.extractor.config(target + "-filter")
|
|
|
|
if pfilter:
|
|
|
|
try:
|
|
|
|
pred = util.FilterPredicate(pfilter, target)
|
|
|
|
except (SyntaxError, ValueError, TypeError) as exc:
|
|
|
|
self.extractor.log.warning(exc)
|
|
|
|
else:
|
|
|
|
predicates.append(pred)
|
|
|
|
|
|
|
|
prange = self.extractor.config(target + "-range")
|
|
|
|
if prange:
|
|
|
|
try:
|
|
|
|
pred = util.RangePredicate(prange)
|
|
|
|
except ValueError as exc:
|
|
|
|
self.extractor.log.warning(
|
|
|
|
"invalid %s range: %s", target, exc)
|
|
|
|
else:
|
|
|
|
if skip and pred.lower > 1 and not pfilter:
|
|
|
|
pred.index += self.extractor.skip(pred.lower - 1)
|
|
|
|
predicates.append(pred)
|
|
|
|
|
|
|
|
return util.build_predicate(predicates)
|
|
|
|
|
2017-05-27 16:16:57 +02:00
|
|
|
def _write_unsupported(self, url):
|
implement logging options
Standard logging to stderr, logfiles, and unsupported URL files (which
are now handled through the logging module) can now be configured by
setting their respective option keys (log, logfile, unsupportedfile)
to a dict and specifying the following options;
- format:
format string for logging messages
available keys: see [1]
default: "[{name}][{levelname}] {message}"
- format-date:
format string for {asctime} fields in logging messages
available keys: see [2]
default: "%Y-%m-%d %H:%M:%S"
- level:
the lowercase levelname until which the logger should activate;
available levels are debug, info, warning, error, exception
default: "info"
- path:
path of the file to be written to
- mode:
'mode' argument when opening the specified file
can be either "w" to truncate the file or "a" to append to it (see [3])
If 'output.log', '.logfile', or '.unsupportedfile' is a string, it will
be interpreted, as it has been, as the filepath
(or as format string for .log)
[1] https://docs.python.org/3/library/logging.html#logrecord-attributes
[2] https://docs.python.org/3/library/time.html#time.strftime
[3] https://docs.python.org/3/library/functions.html#open
2018-05-01 17:54:52 +02:00
|
|
|
if self.ulog:
|
|
|
|
self.ulog.info(url)
|
2017-05-27 16:16:57 +02:00
|
|
|
|
2017-01-30 19:40:15 +01:00
|
|
|
|
2015-12-12 00:11:05 +01:00
|
|
|
class DownloadJob(Job):
|
|
|
|
"""Download images into appropriate directory/filename locations"""
|
|
|
|
|
2017-09-30 18:52:23 +02:00
|
|
|
def __init__(self, url, parent=None):
|
2017-10-06 15:38:35 +02:00
|
|
|
Job.__init__(self, url, parent)
|
2018-02-01 20:49:41 +01:00
|
|
|
self.log = logging.getLogger("download")
|
2017-12-29 22:15:57 +01:00
|
|
|
self.pathfmt = None
|
2018-02-01 20:49:41 +01:00
|
|
|
self.archive = None
|
2017-12-29 22:15:57 +01:00
|
|
|
self.sleep = None
|
2015-04-08 01:51:48 +02:00
|
|
|
self.downloaders = {}
|
2018-06-07 22:29:54 +02:00
|
|
|
self.postprocessors = None
|
2016-09-30 12:32:48 +02:00
|
|
|
self.out = output.select()
|
2015-04-08 01:51:48 +02:00
|
|
|
|
2018-02-01 20:49:41 +01:00
|
|
|
def handle_url(self, url, keywords, fallback=None):
|
2016-09-24 10:45:11 +02:00
|
|
|
"""Download the resource specified in 'url'"""
|
2018-02-01 20:49:41 +01:00
|
|
|
# prepare download
|
|
|
|
self.pathfmt.set_keywords(keywords)
|
|
|
|
|
2018-10-18 22:32:03 +02:00
|
|
|
if self.postprocessors:
|
|
|
|
for pp in self.postprocessors:
|
|
|
|
pp.prepare(self.pathfmt)
|
|
|
|
|
2018-02-12 16:56:45 +01:00
|
|
|
if self.pathfmt.exists(self.archive):
|
2018-10-13 17:21:55 +02:00
|
|
|
self.handle_skip()
|
2018-02-01 20:49:41 +01:00
|
|
|
return
|
|
|
|
|
|
|
|
if self.sleep:
|
|
|
|
time.sleep(self.sleep)
|
|
|
|
|
|
|
|
# download from URL
|
2018-10-05 17:58:15 +02:00
|
|
|
if not self.download(url):
|
2018-02-01 20:49:41 +01:00
|
|
|
|
|
|
|
# use fallback URLs if available
|
2018-02-12 16:56:45 +01:00
|
|
|
for num, url in enumerate(fallback or (), 1):
|
2018-02-01 20:49:41 +01:00
|
|
|
self.log.info("Trying fallback URL #%d", num)
|
2018-10-05 17:58:15 +02:00
|
|
|
if self.download(url):
|
2018-02-01 20:49:41 +01:00
|
|
|
break
|
|
|
|
else:
|
|
|
|
# download failed
|
|
|
|
self.log.error(
|
2018-10-05 17:58:15 +02:00
|
|
|
"Failed to download %s", self.pathfmt.filename or url)
|
2018-02-01 20:49:41 +01:00
|
|
|
return
|
|
|
|
|
2018-06-27 17:16:07 +02:00
|
|
|
if not self.pathfmt.temppath:
|
2018-10-13 17:21:55 +02:00
|
|
|
self.handle_skip()
|
2018-06-27 17:16:07 +02:00
|
|
|
return
|
|
|
|
|
2018-05-20 22:03:57 +02:00
|
|
|
# run post processors
|
2018-06-07 22:29:54 +02:00
|
|
|
if self.postprocessors:
|
|
|
|
for pp in self.postprocessors:
|
|
|
|
pp.run(self.pathfmt)
|
2018-05-20 22:03:57 +02:00
|
|
|
|
2018-02-12 16:56:45 +01:00
|
|
|
# download succeeded
|
2018-06-06 20:17:17 +02:00
|
|
|
self.pathfmt.finalize()
|
|
|
|
self.out.success(self.pathfmt.path, 0)
|
2018-02-01 20:49:41 +01:00
|
|
|
if self.archive:
|
2018-02-13 23:45:30 +01:00
|
|
|
self.archive.add(keywords)
|
2018-10-13 17:21:55 +02:00
|
|
|
self._skipcnt = 0
|
2018-01-17 22:08:19 +01:00
|
|
|
|
|
|
|
def handle_urllist(self, urls, keywords):
|
|
|
|
"""Download the resource specified in 'url'"""
|
2018-02-01 20:49:41 +01:00
|
|
|
fallback = iter(urls)
|
|
|
|
url = next(fallback)
|
|
|
|
self.handle_url(url, keywords, fallback)
|
2015-04-08 01:51:48 +02:00
|
|
|
|
2016-09-24 10:45:11 +02:00
|
|
|
def handle_directory(self, keywords):
|
2015-04-08 01:51:48 +02:00
|
|
|
"""Set and create the target directory for downloads"""
|
2018-09-21 17:55:04 +02:00
|
|
|
if not self.pathfmt:
|
2018-11-21 22:21:26 +01:00
|
|
|
self.initialize(keywords)
|
|
|
|
else:
|
|
|
|
self.pathfmt.set_directory(keywords)
|
2018-09-21 17:55:04 +02:00
|
|
|
|
|
|
|
def handle_queue(self, url, keywords):
|
|
|
|
try:
|
|
|
|
self.__class__(url, self).run()
|
|
|
|
except exception.NoExtractorError:
|
|
|
|
self._write_unsupported(url)
|
|
|
|
|
|
|
|
def handle_finalize(self):
|
|
|
|
if self.postprocessors:
|
|
|
|
for pp in self.postprocessors:
|
|
|
|
pp.finalize()
|
|
|
|
|
2018-10-13 17:21:55 +02:00
|
|
|
def handle_skip(self):
|
|
|
|
self.out.skip(self.pathfmt.path)
|
|
|
|
if self._skipexc:
|
|
|
|
self._skipcnt += 1
|
|
|
|
if self._skipcnt >= self._skipmax:
|
|
|
|
raise self._skipexc()
|
|
|
|
|
2018-10-05 17:58:15 +02:00
|
|
|
def download(self, url):
|
|
|
|
"""Download 'url'"""
|
2018-09-21 17:55:04 +02:00
|
|
|
scheme = url.partition(":")[0]
|
2018-10-05 17:58:15 +02:00
|
|
|
downloader = self.get_downloader(scheme)
|
|
|
|
if downloader:
|
|
|
|
return downloader.download(url, self.pathfmt)
|
2018-11-13 18:06:36 +01:00
|
|
|
self._write_unsupported(url)
|
2018-10-05 17:58:15 +02:00
|
|
|
return False
|
|
|
|
|
|
|
|
def get_downloader(self, scheme):
|
|
|
|
"""Return a downloader suitable for 'scheme'"""
|
2018-09-21 17:55:04 +02:00
|
|
|
if scheme == "https":
|
|
|
|
scheme = "http"
|
|
|
|
try:
|
|
|
|
return self.downloaders[scheme]
|
|
|
|
except KeyError:
|
|
|
|
pass
|
2018-10-05 17:58:15 +02:00
|
|
|
|
2018-09-21 17:55:04 +02:00
|
|
|
klass = downloader.find(scheme)
|
2018-11-16 18:02:24 +01:00
|
|
|
if klass and config.get(("downloader", scheme, "enabled"), True):
|
2018-10-06 19:59:19 +02:00
|
|
|
instance = klass(self.extractor, self.out)
|
2018-10-05 17:58:15 +02:00
|
|
|
else:
|
|
|
|
instance = None
|
2018-11-16 18:02:24 +01:00
|
|
|
self.log.error("'%s:' URLs are not supported/enabled", scheme)
|
2018-09-21 17:55:04 +02:00
|
|
|
self.downloaders[scheme] = instance
|
|
|
|
return instance
|
2018-06-08 17:39:02 +02:00
|
|
|
|
2018-11-21 22:21:26 +01:00
|
|
|
def initialize(self, keywords=None):
|
2018-09-21 17:55:04 +02:00
|
|
|
"""Delayed initialization of PathFormat, etc."""
|
2018-06-08 17:39:02 +02:00
|
|
|
self.pathfmt = util.PathFormat(self.extractor)
|
2018-11-21 22:21:26 +01:00
|
|
|
if keywords:
|
|
|
|
self.pathfmt.set_directory(keywords)
|
2018-06-08 17:39:02 +02:00
|
|
|
self.sleep = self.extractor.config("sleep")
|
|
|
|
|
2018-10-13 17:21:55 +02:00
|
|
|
skip = self.extractor.config("skip", True)
|
|
|
|
if skip:
|
|
|
|
self._skipexc = None
|
|
|
|
if isinstance(skip, str):
|
|
|
|
skip, _, smax = skip.partition(":")
|
|
|
|
if skip == "abort":
|
|
|
|
self._skipexc = exception.StopExtraction
|
|
|
|
elif skip == "exit":
|
|
|
|
self._skipexc = sys.exit
|
|
|
|
self._skipcnt = 0
|
|
|
|
self._skipmax = text.parse_int(smax)
|
|
|
|
else:
|
|
|
|
self.pathfmt.exists = lambda x=None: False
|
|
|
|
|
2018-06-08 17:39:02 +02:00
|
|
|
archive = self.extractor.config("archive")
|
|
|
|
if archive:
|
|
|
|
path = util.expand_path(archive)
|
|
|
|
self.archive = util.DownloadArchive(path, self.extractor)
|
|
|
|
|
|
|
|
postprocessors = self.extractor.config("postprocessors")
|
|
|
|
if postprocessors:
|
|
|
|
self.postprocessors = []
|
|
|
|
for pp_dict in postprocessors:
|
2018-09-03 14:53:43 +02:00
|
|
|
whitelist = pp_dict.get("whitelist")
|
|
|
|
blacklist = pp_dict.get("blacklist")
|
|
|
|
if (whitelist and self.extractor.category not in whitelist or
|
|
|
|
blacklist and self.extractor.category in blacklist):
|
2018-06-08 17:39:02 +02:00
|
|
|
continue
|
2018-09-03 14:53:43 +02:00
|
|
|
name = pp_dict.get("name")
|
2018-06-08 17:39:02 +02:00
|
|
|
pp_cls = postprocessor.find(name)
|
|
|
|
if not pp_cls:
|
2018-09-03 14:53:43 +02:00
|
|
|
postprocessor.log.warning("module '%s' not found", name)
|
2018-06-08 17:39:02 +02:00
|
|
|
continue
|
|
|
|
try:
|
|
|
|
pp_obj = pp_cls(self.pathfmt, pp_dict)
|
|
|
|
except Exception as exc:
|
|
|
|
postprocessor.log.error(
|
|
|
|
"%s: initialization failed: %s %s",
|
|
|
|
name, exc.__class__.__name__, exc)
|
|
|
|
else:
|
|
|
|
self.postprocessors.append(pp_obj)
|
2018-09-03 14:53:43 +02:00
|
|
|
self.extractor.log.debug(
|
|
|
|
"Active postprocessor modules: %s", self.postprocessors)
|
2015-04-08 01:51:48 +02:00
|
|
|
|
2015-11-13 01:02:49 +01:00
|
|
|
|
2018-05-25 16:07:18 +02:00
|
|
|
class SimulationJob(DownloadJob):
|
|
|
|
"""Simulate the extraction process without downloading anything"""
|
|
|
|
|
|
|
|
def handle_url(self, url, keywords, fallback=None):
|
|
|
|
self.pathfmt.set_keywords(keywords)
|
|
|
|
self.out.skip(self.pathfmt.path)
|
|
|
|
if self.sleep:
|
|
|
|
time.sleep(self.sleep)
|
|
|
|
if self.archive:
|
|
|
|
self.archive.add(keywords)
|
|
|
|
|
2018-09-21 17:55:04 +02:00
|
|
|
def handle_directory(self, keywords):
|
|
|
|
if not self.pathfmt:
|
|
|
|
self.initialize()
|
|
|
|
|
2018-05-25 16:07:18 +02:00
|
|
|
|
2015-12-12 00:11:05 +01:00
|
|
|
class KeywordJob(Job):
|
|
|
|
"""Print available keywords"""
|
2015-11-13 01:02:49 +01:00
|
|
|
|
2017-05-17 14:31:14 +02:00
|
|
|
def handle_url(self, url, keywords):
|
2017-09-30 18:52:23 +02:00
|
|
|
print("\nKeywords for filenames and --filter:")
|
|
|
|
print("------------------------------------")
|
2017-05-17 14:31:14 +02:00
|
|
|
self.print_keywords(keywords)
|
|
|
|
raise exception.StopExtraction()
|
|
|
|
|
|
|
|
def handle_directory(self, keywords):
|
|
|
|
print("Keywords for directory names:")
|
|
|
|
print("-----------------------------")
|
|
|
|
self.print_keywords(keywords)
|
2015-11-13 01:02:49 +01:00
|
|
|
|
2017-09-12 16:19:00 +02:00
|
|
|
def handle_queue(self, url, keywords):
|
2017-09-26 20:50:49 +02:00
|
|
|
if not keywords:
|
|
|
|
self.extractor.log.info(
|
2017-10-09 23:20:17 +02:00
|
|
|
"This extractor delegates work to other extractors "
|
2017-09-26 20:50:49 +02:00
|
|
|
"and does not provide any keywords on its own. Try "
|
|
|
|
"'gallery-dl -K \"%s\"' instead.", url)
|
|
|
|
else:
|
|
|
|
print("Keywords for --chapter-filter:")
|
|
|
|
print("------------------------------")
|
|
|
|
self.print_keywords(keywords)
|
|
|
|
if self.extractor.categorytransfer:
|
|
|
|
print()
|
2017-09-30 18:52:23 +02:00
|
|
|
KeywordJob(url, self).run()
|
2017-08-10 17:36:21 +02:00
|
|
|
raise exception.StopExtraction()
|
|
|
|
|
2015-11-13 01:02:49 +01:00
|
|
|
@staticmethod
|
2017-05-17 14:31:14 +02:00
|
|
|
def print_keywords(keywords, prefix=""):
|
2015-12-12 01:16:02 +01:00
|
|
|
"""Print key-value pairs with formatting"""
|
2017-05-17 14:31:14 +02:00
|
|
|
suffix = "]" if prefix else ""
|
2015-11-13 01:02:49 +01:00
|
|
|
for key, value in sorted(keywords.items()):
|
2017-05-17 14:31:14 +02:00
|
|
|
key = prefix + key + suffix
|
2017-05-15 18:30:47 +02:00
|
|
|
|
|
|
|
if isinstance(value, dict):
|
2017-05-17 14:31:14 +02:00
|
|
|
KeywordJob.print_keywords(value, key + "[")
|
2017-05-15 18:30:47 +02:00
|
|
|
|
|
|
|
elif isinstance(value, list):
|
|
|
|
if value and isinstance(value[0], dict):
|
2017-05-17 14:31:14 +02:00
|
|
|
KeywordJob.print_keywords(value[0], key + "[][")
|
2017-05-15 18:30:47 +02:00
|
|
|
else:
|
2017-05-17 14:31:14 +02:00
|
|
|
print(key, "[]", sep="")
|
2017-05-15 18:30:47 +02:00
|
|
|
for val in value:
|
2017-05-17 14:31:14 +02:00
|
|
|
print(" -", val)
|
2017-05-15 18:30:47 +02:00
|
|
|
|
|
|
|
else:
|
|
|
|
# string or number
|
2017-05-17 14:31:14 +02:00
|
|
|
print(key, "\n ", value, sep="")
|
2015-12-10 02:14:28 +01:00
|
|
|
|
|
|
|
|
2015-12-12 00:11:05 +01:00
|
|
|
class UrlJob(Job):
|
|
|
|
"""Print download urls"""
|
2018-01-22 22:49:00 +01:00
|
|
|
maxdepth = 1
|
2017-02-17 22:18:16 +01:00
|
|
|
|
2017-09-30 18:52:23 +02:00
|
|
|
def __init__(self, url, parent=None, depth=1):
|
|
|
|
Job.__init__(self, url, parent)
|
2017-02-17 22:18:16 +01:00
|
|
|
self.depth = depth
|
2018-01-22 22:49:00 +01:00
|
|
|
if depth >= self.maxdepth:
|
2017-09-12 16:19:00 +02:00
|
|
|
self.handle_queue = self.handle_url
|
2015-12-10 02:14:28 +01:00
|
|
|
|
2017-05-23 11:48:00 +02:00
|
|
|
@staticmethod
|
|
|
|
def handle_url(url, _):
|
2016-09-24 10:45:11 +02:00
|
|
|
print(url)
|
2016-08-11 13:20:21 +02:00
|
|
|
|
2018-01-17 22:08:19 +01:00
|
|
|
@staticmethod
|
|
|
|
def handle_urllist(urls, _):
|
|
|
|
prefix = ""
|
|
|
|
for url in urls:
|
|
|
|
print(prefix, url, sep="")
|
|
|
|
prefix = "| "
|
|
|
|
|
2017-09-12 16:19:00 +02:00
|
|
|
def handle_queue(self, url, _):
|
2016-09-24 10:45:11 +02:00
|
|
|
try:
|
2017-09-30 18:52:23 +02:00
|
|
|
UrlJob(url, self, self.depth + 1).run()
|
2016-09-24 10:45:11 +02:00
|
|
|
except exception.NoExtractorError:
|
2017-05-27 16:16:57 +02:00
|
|
|
self._write_unsupported(url)
|
2017-05-23 11:48:00 +02:00
|
|
|
|
2015-12-12 01:16:02 +01:00
|
|
|
|
2017-02-26 02:06:56 +01:00
|
|
|
class TestJob(DownloadJob):
|
|
|
|
"""Generate test-results for extractor runs"""
|
2015-12-12 01:16:02 +01:00
|
|
|
|
2015-12-21 22:49:04 +01:00
|
|
|
class HashIO():
|
2016-09-24 10:45:11 +02:00
|
|
|
"""Minimal file-like interface"""
|
2015-12-21 22:49:04 +01:00
|
|
|
|
|
|
|
def __init__(self, hashobj):
|
|
|
|
self.hashobj = hashobj
|
2016-09-30 12:32:48 +02:00
|
|
|
self.path = ""
|
2017-10-20 18:56:18 +02:00
|
|
|
self.size = 0
|
2016-09-30 12:32:48 +02:00
|
|
|
self.has_extension = True
|
|
|
|
|
|
|
|
def __enter__(self):
|
|
|
|
return self
|
|
|
|
|
|
|
|
def __exit__(self, *args):
|
|
|
|
pass
|
|
|
|
|
2017-10-25 12:55:36 +02:00
|
|
|
def open(self, mode):
|
2017-10-20 18:56:18 +02:00
|
|
|
self.size = 0
|
2016-09-30 12:32:48 +02:00
|
|
|
return self
|
2015-12-21 22:49:04 +01:00
|
|
|
|
|
|
|
def write(self, content):
|
2016-09-24 10:45:11 +02:00
|
|
|
"""Update SHA1 hash"""
|
2017-10-20 18:56:18 +02:00
|
|
|
self.size += len(content)
|
2015-12-21 22:49:04 +01:00
|
|
|
self.hashobj.update(content)
|
|
|
|
|
2017-10-20 18:56:18 +02:00
|
|
|
def tell(self):
|
|
|
|
return self.size
|
|
|
|
|
2017-10-25 12:55:36 +02:00
|
|
|
def part_size(self):
|
|
|
|
return 0
|
|
|
|
|
2017-10-06 15:38:35 +02:00
|
|
|
def __init__(self, url, parent=None, content=False):
|
|
|
|
DownloadJob.__init__(self, url, parent)
|
2017-01-30 19:40:15 +01:00
|
|
|
self.content = content
|
update extractor-unittest capabilities
- "count" can now be a string defining a comparison in the form of
'<operator> <value>', for example: '> 12' or '!= 1'. If its value
is not a string, it is assumed to be a concrete integer as before.
- "keyword" can now be a dictionary defining tests for individual keys.
These tests can either be a type, a concrete value or a regex
starting with "re:". Dictionaries can be stacked inside each other.
Optional keys can be indicated with a "?" before its name.
For example:
"keyword:" {
"image_id": int,
"gallery_id", 123,
"name": "re:pattern",
"user": {
"id": 321,
},
"?optional": None,
}
2017-12-30 19:05:37 +01:00
|
|
|
self.list_url = []
|
|
|
|
self.list_keyword = []
|
2018-02-12 23:02:09 +01:00
|
|
|
self.list_archive = []
|
2017-01-30 19:40:15 +01:00
|
|
|
self.hash_url = hashlib.sha1()
|
2015-12-12 01:16:02 +01:00
|
|
|
self.hash_keyword = hashlib.sha1()
|
2018-02-12 23:02:09 +01:00
|
|
|
self.hash_archive = hashlib.sha1()
|
2015-12-21 22:49:04 +01:00
|
|
|
self.hash_content = hashlib.sha1()
|
|
|
|
if content:
|
|
|
|
self.fileobj = self.HashIO(self.hash_content)
|
2018-10-05 21:09:16 +02:00
|
|
|
self.get_downloader("http")._check_extension = lambda a, b: None
|
2015-12-12 01:16:02 +01:00
|
|
|
|
2017-02-26 02:06:56 +01:00
|
|
|
def run(self):
|
2017-02-27 23:05:08 +01:00
|
|
|
for msg in self.extractor:
|
|
|
|
self.dispatch(msg)
|
2017-02-26 02:06:56 +01:00
|
|
|
|
2016-09-24 10:45:11 +02:00
|
|
|
def handle_url(self, url, keywords):
|
|
|
|
self.update_url(url)
|
|
|
|
self.update_keyword(keywords)
|
2018-02-12 23:02:09 +01:00
|
|
|
self.update_archive(keywords)
|
2016-09-24 10:45:11 +02:00
|
|
|
self.update_content(url)
|
2015-12-12 01:16:02 +01:00
|
|
|
|
2018-01-19 22:54:15 +01:00
|
|
|
def handle_urllist(self, urls, keywords):
|
|
|
|
self.handle_url(urls[0], keywords)
|
|
|
|
|
2016-09-24 10:45:11 +02:00
|
|
|
def handle_directory(self, keywords):
|
update extractor-unittest capabilities
- "count" can now be a string defining a comparison in the form of
'<operator> <value>', for example: '> 12' or '!= 1'. If its value
is not a string, it is assumed to be a concrete integer as before.
- "keyword" can now be a dictionary defining tests for individual keys.
These tests can either be a type, a concrete value or a regex
starting with "re:". Dictionaries can be stacked inside each other.
Optional keys can be indicated with a "?" before its name.
For example:
"keyword:" {
"image_id": int,
"gallery_id", 123,
"name": "re:pattern",
"user": {
"id": 321,
},
"?optional": None,
}
2017-12-30 19:05:37 +01:00
|
|
|
self.update_keyword(keywords, False)
|
2015-12-12 01:16:02 +01:00
|
|
|
|
2017-09-12 16:19:00 +02:00
|
|
|
def handle_queue(self, url, keywords):
|
2015-12-12 01:16:02 +01:00
|
|
|
self.update_url(url)
|
2017-09-12 16:19:00 +02:00
|
|
|
self.update_keyword(keywords)
|
2015-12-12 01:16:02 +01:00
|
|
|
|
|
|
|
def update_url(self, url):
|
2016-09-24 10:45:11 +02:00
|
|
|
"""Update the URL hash"""
|
update extractor-unittest capabilities
- "count" can now be a string defining a comparison in the form of
'<operator> <value>', for example: '> 12' or '!= 1'. If its value
is not a string, it is assumed to be a concrete integer as before.
- "keyword" can now be a dictionary defining tests for individual keys.
These tests can either be a type, a concrete value or a regex
starting with "re:". Dictionaries can be stacked inside each other.
Optional keys can be indicated with a "?" before its name.
For example:
"keyword:" {
"image_id": int,
"gallery_id", 123,
"name": "re:pattern",
"user": {
"id": 321,
},
"?optional": None,
}
2017-12-30 19:05:37 +01:00
|
|
|
self.list_url.append(url)
|
2015-12-12 01:16:02 +01:00
|
|
|
self.hash_url.update(url.encode())
|
|
|
|
|
update extractor-unittest capabilities
- "count" can now be a string defining a comparison in the form of
'<operator> <value>', for example: '> 12' or '!= 1'. If its value
is not a string, it is assumed to be a concrete integer as before.
- "keyword" can now be a dictionary defining tests for individual keys.
These tests can either be a type, a concrete value or a regex
starting with "re:". Dictionaries can be stacked inside each other.
Optional keys can be indicated with a "?" before its name.
For example:
"keyword:" {
"image_id": int,
"gallery_id", 123,
"name": "re:pattern",
"user": {
"id": 321,
},
"?optional": None,
}
2017-12-30 19:05:37 +01:00
|
|
|
def update_keyword(self, kwdict, to_list=True):
|
2016-09-24 10:45:11 +02:00
|
|
|
"""Update the keyword hash"""
|
update extractor-unittest capabilities
- "count" can now be a string defining a comparison in the form of
'<operator> <value>', for example: '> 12' or '!= 1'. If its value
is not a string, it is assumed to be a concrete integer as before.
- "keyword" can now be a dictionary defining tests for individual keys.
These tests can either be a type, a concrete value or a regex
starting with "re:". Dictionaries can be stacked inside each other.
Optional keys can be indicated with a "?" before its name.
For example:
"keyword:" {
"image_id": int,
"gallery_id", 123,
"name": "re:pattern",
"user": {
"id": 321,
},
"?optional": None,
}
2017-12-30 19:05:37 +01:00
|
|
|
if to_list:
|
|
|
|
self.list_keyword.append(kwdict.copy())
|
2015-12-12 01:16:02 +01:00
|
|
|
self.hash_keyword.update(
|
update extractor-unittest capabilities
- "count" can now be a string defining a comparison in the form of
'<operator> <value>', for example: '> 12' or '!= 1'. If its value
is not a string, it is assumed to be a concrete integer as before.
- "keyword" can now be a dictionary defining tests for individual keys.
These tests can either be a type, a concrete value or a regex
starting with "re:". Dictionaries can be stacked inside each other.
Optional keys can be indicated with a "?" before its name.
For example:
"keyword:" {
"image_id": int,
"gallery_id", 123,
"name": "re:pattern",
"user": {
"id": 321,
},
"?optional": None,
}
2017-12-30 19:05:37 +01:00
|
|
|
json.dumps(kwdict, sort_keys=True).encode())
|
2015-12-21 22:49:04 +01:00
|
|
|
|
2018-02-12 23:02:09 +01:00
|
|
|
def update_archive(self, kwdict):
|
|
|
|
"""Update the archive-id hash"""
|
|
|
|
archive_id = self.extractor.archive_fmt.format_map(kwdict)
|
|
|
|
self.list_archive.append(archive_id)
|
|
|
|
self.hash_archive.update(archive_id.encode())
|
|
|
|
|
2015-12-21 22:49:04 +01:00
|
|
|
def update_content(self, url):
|
2016-09-24 10:45:11 +02:00
|
|
|
"""Update the content hash"""
|
2015-12-21 22:49:04 +01:00
|
|
|
if self.content:
|
2018-10-05 21:09:16 +02:00
|
|
|
scheme = url.partition(":")[0]
|
|
|
|
self.get_downloader(scheme).download(url, self.fileobj)
|
2017-04-12 18:43:41 +02:00
|
|
|
|
|
|
|
|
|
|
|
class DataJob(Job):
|
|
|
|
"""Collect extractor results and dump them"""
|
|
|
|
|
2018-11-15 14:24:18 +01:00
|
|
|
def __init__(self, url, parent=None, file=sys.stdout, ensure_ascii=True):
|
2017-10-06 15:38:35 +02:00
|
|
|
Job.__init__(self, url, parent)
|
2017-04-12 18:43:41 +02:00
|
|
|
self.file = file
|
|
|
|
self.data = []
|
2018-11-15 14:24:18 +01:00
|
|
|
self.ascii = config.get(("output", "ascii"), ensure_ascii)
|
2017-04-12 18:43:41 +02:00
|
|
|
|
|
|
|
def run(self):
|
|
|
|
# collect data
|
|
|
|
try:
|
|
|
|
for msg in self.extractor:
|
2017-11-18 17:35:57 +01:00
|
|
|
self.dispatch(msg)
|
2018-11-15 14:24:18 +01:00
|
|
|
except exception.StopExtraction:
|
|
|
|
pass
|
2017-04-12 18:43:41 +02:00
|
|
|
except Exception as exc:
|
|
|
|
self.data.append((exc.__class__.__name__, str(exc)))
|
2017-11-18 17:35:57 +01:00
|
|
|
except BaseException:
|
|
|
|
pass
|
2017-04-12 18:43:41 +02:00
|
|
|
|
2018-10-08 20:28:54 +02:00
|
|
|
if config.get(("output", "num-to-str"), False):
|
|
|
|
for msg in self.data:
|
|
|
|
util.transform_dict(msg[-1], util.number_to_string)
|
|
|
|
|
2017-04-12 18:43:41 +02:00
|
|
|
# dump to 'file'
|
|
|
|
json.dump(
|
|
|
|
self.data, self.file,
|
2018-11-15 14:24:18 +01:00
|
|
|
sort_keys=True, indent=2, ensure_ascii=self.ascii,
|
2017-04-12 18:43:41 +02:00
|
|
|
)
|
|
|
|
self.file.write("\n")
|
2017-11-18 17:35:57 +01:00
|
|
|
|
|
|
|
def handle_url(self, url, keywords):
|
|
|
|
self.data.append((Message.Url, url, keywords.copy()))
|
|
|
|
|
2018-01-17 22:08:19 +01:00
|
|
|
def handle_urllist(self, urls, keywords):
|
|
|
|
self.data.append((Message.Urllist, list(urls), keywords.copy()))
|
|
|
|
|
2017-11-18 17:35:57 +01:00
|
|
|
def handle_directory(self, keywords):
|
|
|
|
self.data.append((Message.Directory, keywords.copy()))
|
|
|
|
|
|
|
|
def handle_queue(self, url, keywords):
|
|
|
|
self.data.append((Message.Queue, url, keywords.copy()))
|
2018-10-08 20:28:54 +02:00
|
|
|
|
|
|
|
def handle_finalize(self):
|
|
|
|
self.file.close()
|