1
0
mirror of https://github.com/mikf/gallery-dl.git synced 2024-11-22 18:53:21 +01:00
gallery-dl/gallery_dl/job.py

556 lines
18 KiB
Python
Raw Normal View History

# -*- coding: utf-8 -*-
# Copyright 2015-2020 Mike Fährmann
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
import sys
import time
import errno
import logging
2018-05-20 22:03:57 +02:00
from . import extractor, downloader, postprocessor
from . import config, text, util, output, exception
2015-11-24 19:47:51 +01:00
from .extractor.message import Message
2017-01-30 19:40:15 +01:00
2015-12-12 00:11:05 +01:00
class Job():
"""Base class for Job-types"""
ulog = None
2015-04-08 01:51:48 +02:00
def __init__(self, extr, parent=None):
if isinstance(extr, str):
extr = extractor.find(extr)
if not extr:
raise exception.NoExtractorError()
self.extractor = extr
extr.log.extractor = extr
extr.log.job = self
extr.log.debug("Using %s for '%s'", extr.__class__.__name__, extr.url)
2015-12-12 00:11:05 +01:00
self.status = 0
self.pred_url = self._prepare_predicates("image", True)
self.pred_queue = self._prepare_predicates("chapter", False)
if parent and parent.extractor.config(
"category-transfer", parent.extractor.categorytransfer):
2017-09-30 18:52:23 +02:00
self.extractor.category = parent.extractor.category
self.extractor.subcategory = parent.extractor.subcategory
# user-supplied metadata
self.userkwds = self.extractor.config("keywords")
def run(self):
"""Execute or run the job"""
try:
2017-03-11 01:47:57 +01:00
log = self.extractor.log
for msg in self.extractor:
2017-02-26 02:06:56 +01:00
self.dispatch(msg)
2019-10-27 23:05:00 +01:00
except exception.StopExtraction as exc:
if exc.message:
log.error(exc.message)
self.status |= exc.code
2019-10-27 23:05:00 +01:00
except exception.GalleryDLException as exc:
log.error("%s: %s", exc.__class__.__name__, exc)
self.status |= exc.code
except OSError as exc:
log.error("Unable to download data: %s: %s",
exc.__class__.__name__, exc)
log.debug("", exc_info=True)
self.status |= 128
except Exception as exc:
log.error(("An unexpected error occurred: %s - %s. "
"Please run gallery-dl again with the --verbose flag, "
"copy its output and report this issue on "
"https://github.com/mikf/gallery-dl/issues ."),
exc.__class__.__name__, exc)
log.debug("", exc_info=True)
self.status |= 1
except BaseException:
self.status |= 1
raise
finally:
self.handle_finalize()
return self.status
2017-02-26 02:06:56 +01:00
def dispatch(self, msg):
"""Call the appropriate message handler"""
2017-03-17 09:39:46 +01:00
if msg[0] == Message.Url:
_, url, kwds = msg
if self.pred_url(url, kwds):
self.update_kwdict(kwds)
self.handle_url(url, kwds)
2017-02-26 02:06:56 +01:00
elif msg[0] == Message.Directory:
self.update_kwdict(msg[1])
self.handle_directory(msg[1])
2017-03-17 09:39:46 +01:00
elif msg[0] == Message.Queue:
_, url, kwds = msg
if self.pred_queue(url, kwds):
self.handle_queue(url, kwds)
2017-02-26 02:06:56 +01:00
elif msg[0] == Message.Urllist:
_, urls, kwds = msg
if self.pred_url(urls[0], kwds):
self.update_kwdict(kwds)
self.handle_urllist(urls, kwds)
elif msg[0] == Message.Metadata:
self.update_kwdict(msg[1])
self.handle_metadata(msg[1])
2017-02-26 02:06:56 +01:00
elif msg[0] == Message.Version:
if msg[1] != 1:
raise "unsupported message-version ({}, {})".format(
self.extractor.category, msg[1]
)
# TODO: support for multiple message versions
def handle_url(self, url, kwdict):
"""Handle Message.Url"""
def handle_urllist(self, urls, kwdict):
"""Handle Message.Urllist"""
self.handle_url(urls[0], kwdict)
def handle_directory(self, kwdict):
"""Handle Message.Directory"""
def handle_metadata(self, kwdict):
"""Handle Message.Metadata"""
def handle_queue(self, url, kwdict):
"""Handle Message.Queue"""
def handle_finalize(self):
"""Handle job finalization"""
def update_kwdict(self, kwdict):
"""Update 'kwdict' with additional metadata"""
extr = self.extractor
kwdict["category"] = extr.category
kwdict["subcategory"] = extr.subcategory
if self.userkwds:
kwdict.update(self.userkwds)
2015-12-12 00:11:05 +01:00
def _prepare_predicates(self, target, skip=True):
predicates = []
if self.extractor.config(target + "-unique"):
predicates.append(util.UniquePredicate())
pfilter = self.extractor.config(target + "-filter")
if pfilter:
try:
pred = util.FilterPredicate(pfilter, target)
except (SyntaxError, ValueError, TypeError) as exc:
self.extractor.log.warning(exc)
else:
predicates.append(pred)
prange = self.extractor.config(target + "-range")
if prange:
try:
pred = util.RangePredicate(prange)
except ValueError as exc:
self.extractor.log.warning(
"invalid %s range: %s", target, exc)
else:
if skip and pred.lower > 1 and not pfilter:
pred.index += self.extractor.skip(pred.lower - 1)
predicates.append(pred)
return util.build_predicate(predicates)
2017-05-27 16:16:57 +02:00
def _write_unsupported(self, url):
if self.ulog:
self.ulog.info(url)
2017-05-27 16:16:57 +02:00
2017-01-30 19:40:15 +01:00
2015-12-12 00:11:05 +01:00
class DownloadJob(Job):
"""Download images into appropriate directory/filename locations"""
2017-09-30 18:52:23 +02:00
def __init__(self, url, parent=None):
Job.__init__(self, url, parent)
self.log = logging.getLogger("download")
self.pathfmt = None
self.archive = None
self.sleep = None
2015-04-08 01:51:48 +02:00
self.downloaders = {}
self.postprocessors = None
self.out = output.select()
self.visited = parent.visited if parent else set()
2015-04-08 01:51:48 +02:00
def handle_url(self, url, kwdict, fallback=None):
"""Download the resource specified in 'url'"""
postprocessors = self.postprocessors
pathfmt = self.pathfmt
archive = self.archive
# prepare download
pathfmt.set_filename(kwdict)
if postprocessors:
for pp in postprocessors:
pp.prepare(pathfmt)
if pathfmt.exists(archive):
self.handle_skip()
return
if self.sleep:
time.sleep(self.sleep)
# download from URL
if not self.download(url):
# use fallback URLs if available
for num, url in enumerate(fallback or (), 1):
util.remove_file(self.pathfmt.temppath)
self.log.info("Trying fallback URL #%d", num)
if self.download(url):
break
else:
# download failed
self.status |= 4
self.log.error("Failed to download %s",
pathfmt.filename or url)
return
if not pathfmt.temppath:
self.handle_skip()
return
2018-05-20 22:03:57 +02:00
# run post processors
if postprocessors:
for pp in postprocessors:
pp.run(pathfmt)
2018-05-20 22:03:57 +02:00
# download succeeded
pathfmt.finalize()
self.out.success(pathfmt.path, 0)
if archive:
archive.add(kwdict)
if postprocessors:
for pp in postprocessors:
pp.run_after(pathfmt)
self._skipcnt = 0
def handle_urllist(self, urls, kwdict):
"""Download the resource specified in 'url'"""
fallback = iter(urls)
url = next(fallback)
self.handle_url(url, kwdict, fallback)
2015-04-08 01:51:48 +02:00
def handle_directory(self, kwdict):
2015-04-08 01:51:48 +02:00
"""Set and create the target directory for downloads"""
if not self.pathfmt:
self.initialize(kwdict)
else:
self.pathfmt.set_directory(kwdict)
def handle_metadata(self, kwdict):
"""Run postprocessors with metadata from 'kwdict'"""
postprocessors = self.postprocessors
if postprocessors:
pathfmt = self.pathfmt
pathfmt.set_filename(kwdict)
for pp in postprocessors:
pp.run_metadata(pathfmt)
def handle_queue(self, url, kwdict):
if url in self.visited:
return
self.visited.add(url)
if "_extractor" in kwdict:
extr = kwdict["_extractor"].from_url(url)
else:
extr = extractor.find(url)
if extr:
self.status |= self.__class__(extr, self).run()
else:
self._write_unsupported(url)
def handle_finalize(self):
pathfmt = self.pathfmt
2019-09-10 22:26:40 +02:00
if self.archive:
self.archive.close()
if pathfmt:
self.extractor._store_cookies()
if self.postprocessors:
status = self.status
for pp in self.postprocessors:
pp.run_final(pathfmt, status)
def handle_skip(self):
self.out.skip(self.pathfmt.path)
if self._skipexc:
self._skipcnt += 1
if self._skipcnt >= self._skipmax:
raise self._skipexc()
def download(self, url):
"""Download 'url'"""
scheme = url.partition(":")[0]
downloader = self.get_downloader(scheme)
if downloader:
try:
return downloader.download(url, self.pathfmt)
except OSError as exc:
if exc.errno == errno.ENOSPC:
raise
self.log.warning("%s: %s", exc.__class__.__name__, exc)
return False
self._write_unsupported(url)
return False
def get_downloader(self, scheme):
"""Return a downloader suitable for 'scheme'"""
try:
return self.downloaders[scheme]
except KeyError:
pass
2019-11-23 23:50:16 +01:00
cls = downloader.find(scheme)
if cls and config.get(("downloader", cls.scheme), "enabled", True):
instance = cls(self.extractor, self.out)
else:
instance = None
self.log.error("'%s:' URLs are not supported/enabled", scheme)
2019-11-23 23:50:16 +01:00
if cls and cls.scheme == "http":
self.downloaders["http"] = self.downloaders["https"] = instance
else:
self.downloaders[scheme] = instance
return instance
def initialize(self, kwdict=None):
"""Delayed initialization of PathFormat, etc."""
self.pathfmt = util.PathFormat(self.extractor)
if kwdict:
self.pathfmt.set_directory(kwdict)
2019-07-13 21:49:26 +02:00
self.sleep = self.extractor.config("sleep")
2019-07-13 21:49:26 +02:00
if not self.extractor.config("download", True):
self.download = self.pathfmt.fix_extension
skip = self.extractor.config("skip", True)
if skip:
self._skipexc = None
if skip == "enumerate":
self.pathfmt.check_file = self.pathfmt._enum_file
elif isinstance(skip, str):
skip, _, smax = skip.partition(":")
if skip == "abort":
self._skipexc = exception.StopExtraction
elif skip == "exit":
self._skipexc = sys.exit
self._skipcnt = 0
self._skipmax = text.parse_int(smax)
else:
self.pathfmt.exists = lambda x=None: False
archive = self.extractor.config("archive")
if archive:
path = util.expand_path(archive)
try:
self.archive = util.DownloadArchive(path, self.extractor)
except Exception as exc:
self.extractor.log.warning(
"Failed to open download archive at '%s' ('%s: %s')",
path, exc.__class__.__name__, exc)
else:
self.extractor.log.debug("Using download archive '%s'", path)
postprocessors = self.extractor.config("postprocessors")
if postprocessors:
pp_list = []
for pp_dict in postprocessors:
whitelist = pp_dict.get("whitelist")
blacklist = pp_dict.get("blacklist")
if (whitelist and self.extractor.category not in whitelist or
blacklist and self.extractor.category in blacklist):
continue
name = pp_dict.get("name")
pp_cls = postprocessor.find(name)
if not pp_cls:
postprocessor.log.warning("module '%s' not found", name)
continue
try:
pp_obj = pp_cls(self.pathfmt, pp_dict)
except Exception as exc:
postprocessor.log.error(
"'%s' initialization failed: %s: %s",
name, exc.__class__.__name__, exc)
else:
pp_list.append(pp_obj)
if pp_list:
self.postprocessors = pp_list
self.extractor.log.debug(
"Active postprocessor modules: %s", pp_list)
2015-04-08 01:51:48 +02:00
2015-11-13 01:02:49 +01:00
class SimulationJob(DownloadJob):
"""Simulate the extraction process without downloading anything"""
def handle_url(self, url, kwdict, fallback=None):
self.pathfmt.set_filename(kwdict)
self.out.skip(self.pathfmt.path)
if self.sleep:
time.sleep(self.sleep)
if self.archive:
self.archive.add(kwdict)
def handle_directory(self, kwdict):
if not self.pathfmt:
self.initialize()
2015-12-12 00:11:05 +01:00
class KeywordJob(Job):
"""Print available keywords"""
2015-11-13 01:02:49 +01:00
def handle_url(self, url, kwdict):
2017-09-30 18:52:23 +02:00
print("\nKeywords for filenames and --filter:")
print("------------------------------------")
self.print_kwdict(kwdict)
2017-05-17 14:31:14 +02:00
raise exception.StopExtraction()
def handle_directory(self, kwdict):
2017-05-17 14:31:14 +02:00
print("Keywords for directory names:")
print("-----------------------------")
self.print_kwdict(kwdict)
2015-11-13 01:02:49 +01:00
def handle_queue(self, url, kwdict):
if not util.filter_dict(kwdict):
self.extractor.log.info(
"This extractor only spawns other extractors "
"and does not provide any metadata on its own.")
if "_extractor" in kwdict:
self.extractor.log.info(
"Showing results for '%s' instead:\n", url)
extr = kwdict["_extractor"].from_url(url)
KeywordJob(extr, self).run()
else:
self.extractor.log.info(
"Try 'gallery-dl -K \"%s\"' instead.", url)
else:
print("Keywords for --chapter-filter:")
print("------------------------------")
self.print_kwdict(kwdict)
if self.extractor.categorytransfer:
print()
2017-09-30 18:52:23 +02:00
KeywordJob(url, self).run()
2017-08-10 17:36:21 +02:00
raise exception.StopExtraction()
2015-11-13 01:02:49 +01:00
@staticmethod
def print_kwdict(kwdict, prefix=""):
"""Print key-value pairs in 'kwdict' with formatting"""
2017-05-17 14:31:14 +02:00
suffix = "]" if prefix else ""
for key, value in sorted(kwdict.items()):
if key[0] == "_":
continue
2017-05-17 14:31:14 +02:00
key = prefix + key + suffix
if isinstance(value, dict):
KeywordJob.print_kwdict(value, key + "[")
elif isinstance(value, list):
if value and isinstance(value[0], dict):
KeywordJob.print_kwdict(value[0], key + "[][")
else:
2017-05-17 14:31:14 +02:00
print(key, "[]", sep="")
for val in value:
2017-05-17 14:31:14 +02:00
print(" -", val)
else:
# string or number
2017-05-17 14:31:14 +02:00
print(key, "\n ", value, sep="")
2015-12-10 02:14:28 +01:00
2015-12-12 00:11:05 +01:00
class UrlJob(Job):
"""Print download urls"""
2018-01-22 22:49:00 +01:00
maxdepth = 1
2017-09-30 18:52:23 +02:00
def __init__(self, url, parent=None, depth=1):
Job.__init__(self, url, parent)
self.depth = depth
2018-01-22 22:49:00 +01:00
if depth >= self.maxdepth:
self.handle_queue = self.handle_url
2015-12-10 02:14:28 +01:00
@staticmethod
def handle_url(url, _):
print(url)
2016-08-11 13:20:21 +02:00
@staticmethod
def handle_urllist(urls, _):
prefix = ""
for url in urls:
print(prefix, url, sep="")
prefix = "| "
def handle_queue(self, url, _):
try:
2017-09-30 18:52:23 +02:00
UrlJob(url, self, self.depth + 1).run()
except exception.NoExtractorError:
2017-05-27 16:16:57 +02:00
self._write_unsupported(url)
2015-12-12 01:16:02 +01:00
class DataJob(Job):
"""Collect extractor results and dump them"""
2018-11-15 14:24:18 +01:00
def __init__(self, url, parent=None, file=sys.stdout, ensure_ascii=True):
Job.__init__(self, url, parent)
self.file = file
self.data = []
2019-11-23 23:50:16 +01:00
self.ascii = config.get(("output",), "ascii", ensure_ascii)
2019-11-23 23:50:16 +01:00
private = config.get(("output",), "private")
self.filter = (lambda x: x) if private else util.filter_dict
def run(self):
# collect data
try:
for msg in self.extractor:
self.dispatch(msg)
2018-11-15 14:24:18 +01:00
except exception.StopExtraction:
pass
except Exception as exc:
self.data.append((exc.__class__.__name__, str(exc)))
except BaseException:
pass
# convert numbers to string
2019-11-23 23:50:16 +01:00
if config.get(("output",), "num-to-str", False):
for msg in self.data:
util.transform_dict(msg[-1], util.number_to_string)
# dump to 'file'
util.dump_json(self.data, self.file, self.ascii, 2)
2019-10-27 23:05:00 +01:00
return 0
2019-02-13 13:22:11 +01:00
def handle_url(self, url, kwdict):
self.data.append((Message.Url, url, self.filter(kwdict)))
2019-02-13 13:22:11 +01:00
def handle_urllist(self, urls, kwdict):
self.data.append((Message.Urllist, list(urls), self.filter(kwdict)))
2019-02-13 13:22:11 +01:00
def handle_directory(self, kwdict):
self.data.append((Message.Directory, self.filter(kwdict)))
def handle_metadata(self, kwdict):
self.data.append((Message.Metadata, self.filter(kwdict)))
2019-02-13 13:22:11 +01:00
def handle_queue(self, url, kwdict):
self.data.append((Message.Queue, url, self.filter(kwdict)))
def handle_finalize(self):
self.file.close()