1
0
mirror of https://github.com/mikf/gallery-dl.git synced 2024-11-26 04:32:51 +01:00
gallery-dl/gallery_dl/job.py

560 lines
18 KiB
Python
Raw Normal View History

# -*- coding: utf-8 -*-
# Copyright 2015-2018 Mike Fährmann
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
import sys
import time
2015-12-12 01:16:02 +01:00
import json
import hashlib
import logging
2018-05-20 22:03:57 +02:00
from . import extractor, downloader, postprocessor
from . import config, text, util, output, exception
2015-11-24 19:47:51 +01:00
from .extractor.message import Message
2017-01-30 19:40:15 +01:00
2015-12-12 00:11:05 +01:00
class Job():
"""Base class for Job-types"""
ulog = None
2015-04-08 01:51:48 +02:00
2017-09-30 18:52:23 +02:00
def __init__(self, url, parent=None):
2017-02-25 23:53:31 +01:00
self.url = url
2015-11-21 00:30:31 +01:00
self.extractor = extractor.find(url)
2015-06-28 12:45:52 +02:00
if self.extractor is None:
2016-07-14 14:25:56 +02:00
raise exception.NoExtractorError(url)
2017-06-02 09:10:58 +02:00
self.extractor.log.debug(
2017-12-05 23:29:11 +01:00
"Using %s for '%s'", self.extractor.__class__.__name__, url)
2015-12-12 00:11:05 +01:00
# url predicates
self.pred_url = self._prepare_predicates(
"image", [util.UniquePredicate()], True)
# queue predicates
self.pred_queue = self._prepare_predicates(
"chapter", [], False)
2017-09-30 18:52:23 +02:00
# category transfer
if parent and parent.extractor.categorytransfer:
self.extractor.category = parent.extractor.category
self.extractor.subcategory = parent.extractor.subcategory
# user-supplied metadata
self.userkwds = self.extractor.config("keywords")
def run(self):
"""Execute or run the job"""
try:
2017-03-11 01:47:57 +01:00
log = self.extractor.log
for msg in self.extractor:
2017-02-26 02:06:56 +01:00
self.dispatch(msg)
except exception.AuthenticationError as exc:
msg = str(exc) or "Please provide a valid username/password pair."
log.error("Authentication failed: %s", msg)
2017-02-25 23:53:31 +01:00
except exception.AuthorizationError:
2017-03-11 01:47:57 +01:00
log.error("You do not have permission to access the resource "
"at '%s'", self.url)
except exception.NotFoundError as exc:
res = str(exc) or "resource (gallery/image/user)"
2017-03-11 01:47:57 +01:00
log.error("The %s at '%s' does not exist", res, self.url)
except exception.HttpError as exc:
log.error("HTTP request failed: %s", exc)
except exception.FormatError as exc:
err, obj = exc.args
log.error("Applying %s format string failed: %s: %s",
obj, err.__class__.__name__, err)
except exception.FilterError as exc:
err = exc.args[0]
log.error("Evaluating filter expression failed: %s: %s",
err.__class__.__name__, err)
except exception.StopExtraction:
pass
except OSError as exc:
log.error("Unable to download data: %s", exc)
except Exception as exc:
log.error(("An unexpected error occurred: %s - %s. "
"Please run gallery-dl again with the --verbose flag, "
"copy its output and report this issue on "
"https://github.com/mikf/gallery-dl/issues ."),
exc.__class__.__name__, exc)
log.debug("Traceback", exc_info=True)
self.handle_finalize()
2017-02-26 02:06:56 +01:00
def dispatch(self, msg):
"""Call the appropriate message handler"""
2017-03-17 09:39:46 +01:00
if msg[0] == Message.Url:
_, url, kwds = msg
if self.pred_url(url, kwds):
self.update_kwdict(kwds)
self.handle_url(url, kwds)
2017-02-26 02:06:56 +01:00
elif msg[0] == Message.Directory:
self.update_kwdict(msg[1])
self.handle_directory(msg[1])
2017-03-17 09:39:46 +01:00
elif msg[0] == Message.Queue:
_, url, kwds = msg
if self.pred_queue(url, kwds):
self.handle_queue(url, kwds)
2017-02-26 02:06:56 +01:00
elif msg[0] == Message.Urllist:
_, urls, kwds = msg
if self.pred_url(urls[0], kwds):
self.update_kwdict(kwds)
self.handle_urllist(urls, kwds)
2017-02-26 02:06:56 +01:00
elif msg[0] == Message.Version:
if msg[1] != 1:
raise "unsupported message-version ({}, {})".format(
self.extractor.category, msg[1]
)
# TODO: support for multiple message versions
def handle_url(self, url, keywords):
"""Handle Message.Url"""
def handle_urllist(self, urls, keywords):
"""Handle Message.Urllist"""
self.handle_url(urls[0], keywords)
def handle_directory(self, keywords):
"""Handle Message.Directory"""
def handle_queue(self, url, keywords):
"""Handle Message.Queue"""
def handle_finalize(self):
"""Handle job finalization"""
def update_kwdict(self, kwdict):
"""Update 'kwdict' with additional metadata"""
kwdict["category"] = self.extractor.category
kwdict["subcategory"] = self.extractor.subcategory
if self.userkwds:
kwdict.update(self.userkwds)
2015-12-12 00:11:05 +01:00
def _prepare_predicates(self, target, predicates, skip=True):
pfilter = self.extractor.config(target + "-filter")
if pfilter:
try:
pred = util.FilterPredicate(pfilter, target)
except (SyntaxError, ValueError, TypeError) as exc:
self.extractor.log.warning(exc)
else:
predicates.append(pred)
prange = self.extractor.config(target + "-range")
if prange:
try:
pred = util.RangePredicate(prange)
except ValueError as exc:
self.extractor.log.warning(
"invalid %s range: %s", target, exc)
else:
if skip and pred.lower > 1 and not pfilter:
pred.index += self.extractor.skip(pred.lower - 1)
predicates.append(pred)
return util.build_predicate(predicates)
2017-05-27 16:16:57 +02:00
def _write_unsupported(self, url):
if self.ulog:
self.ulog.info(url)
2017-05-27 16:16:57 +02:00
2017-01-30 19:40:15 +01:00
2015-12-12 00:11:05 +01:00
class DownloadJob(Job):
"""Download images into appropriate directory/filename locations"""
2017-09-30 18:52:23 +02:00
def __init__(self, url, parent=None):
Job.__init__(self, url, parent)
self.log = logging.getLogger("download")
self.pathfmt = None
self.archive = None
self.sleep = None
2015-04-08 01:51:48 +02:00
self.downloaders = {}
self.postprocessors = None
self.out = output.select()
2015-04-08 01:51:48 +02:00
def handle_url(self, url, keywords, fallback=None):
"""Download the resource specified in 'url'"""
# prepare download
self.pathfmt.set_keywords(keywords)
if self.pathfmt.exists(self.archive):
self.handle_skip()
return
if self.sleep:
time.sleep(self.sleep)
# download from URL
if not self.download(url):
# use fallback URLs if available
for num, url in enumerate(fallback or (), 1):
self.log.info("Trying fallback URL #%d", num)
if self.download(url):
break
else:
# download failed
self.log.error(
"Failed to download %s", self.pathfmt.filename or url)
return
if not self.pathfmt.temppath:
self.handle_skip()
return
2018-05-20 22:03:57 +02:00
# run post processors
if self.postprocessors:
for pp in self.postprocessors:
pp.run(self.pathfmt)
2018-05-20 22:03:57 +02:00
# download succeeded
2018-06-06 20:17:17 +02:00
self.pathfmt.finalize()
self.out.success(self.pathfmt.path, 0)
if self.archive:
self.archive.add(keywords)
self._skipcnt = 0
def handle_urllist(self, urls, keywords):
"""Download the resource specified in 'url'"""
fallback = iter(urls)
url = next(fallback)
self.handle_url(url, keywords, fallback)
2015-04-08 01:51:48 +02:00
def handle_directory(self, keywords):
2015-04-08 01:51:48 +02:00
"""Set and create the target directory for downloads"""
if not self.pathfmt:
self.initialize()
self.pathfmt.set_directory(keywords)
def handle_queue(self, url, keywords):
try:
self.__class__(url, self).run()
except exception.NoExtractorError:
self._write_unsupported(url)
def handle_finalize(self):
if self.postprocessors:
for pp in self.postprocessors:
pp.finalize()
def handle_skip(self):
self.out.skip(self.pathfmt.path)
if self._skipexc:
self._skipcnt += 1
if self._skipcnt >= self._skipmax:
raise self._skipexc()
def download(self, url):
"""Download 'url'"""
scheme = url.partition(":")[0]
downloader = self.get_downloader(scheme)
if downloader:
return downloader.download(url, self.pathfmt)
return False
def get_downloader(self, scheme):
"""Return a downloader suitable for 'scheme'"""
if scheme == "https":
scheme = "http"
try:
return self.downloaders[scheme]
except KeyError:
pass
klass = downloader.find(scheme)
if klass:
instance = klass(self.extractor, self.out)
else:
instance = None
self.log.error("'%s:' URLs are not supported", scheme)
self.downloaders[scheme] = instance
return instance
def initialize(self):
"""Delayed initialization of PathFormat, etc."""
self.pathfmt = util.PathFormat(self.extractor)
self.sleep = self.extractor.config("sleep")
skip = self.extractor.config("skip", True)
if skip:
self._skipexc = None
if isinstance(skip, str):
skip, _, smax = skip.partition(":")
if skip == "abort":
self._skipexc = exception.StopExtraction
elif skip == "exit":
self._skipexc = sys.exit
self._skipcnt = 0
self._skipmax = text.parse_int(smax)
else:
self.pathfmt.exists = lambda x=None: False
archive = self.extractor.config("archive")
if archive:
path = util.expand_path(archive)
self.archive = util.DownloadArchive(path, self.extractor)
postprocessors = self.extractor.config("postprocessors")
if postprocessors:
self.postprocessors = []
for pp_dict in postprocessors:
whitelist = pp_dict.get("whitelist")
blacklist = pp_dict.get("blacklist")
if (whitelist and self.extractor.category not in whitelist or
blacklist and self.extractor.category in blacklist):
continue
name = pp_dict.get("name")
pp_cls = postprocessor.find(name)
if not pp_cls:
postprocessor.log.warning("module '%s' not found", name)
continue
try:
pp_obj = pp_cls(self.pathfmt, pp_dict)
except Exception as exc:
postprocessor.log.error(
"%s: initialization failed: %s %s",
name, exc.__class__.__name__, exc)
else:
self.postprocessors.append(pp_obj)
self.extractor.log.debug(
"Active postprocessor modules: %s", self.postprocessors)
2015-04-08 01:51:48 +02:00
2015-11-13 01:02:49 +01:00
class SimulationJob(DownloadJob):
"""Simulate the extraction process without downloading anything"""
def handle_url(self, url, keywords, fallback=None):
self.pathfmt.set_keywords(keywords)
self.out.skip(self.pathfmt.path)
if self.sleep:
time.sleep(self.sleep)
if self.archive:
self.archive.add(keywords)
def handle_directory(self, keywords):
if not self.pathfmt:
self.initialize()
2015-12-12 00:11:05 +01:00
class KeywordJob(Job):
"""Print available keywords"""
2015-11-13 01:02:49 +01:00
2017-05-17 14:31:14 +02:00
def handle_url(self, url, keywords):
2017-09-30 18:52:23 +02:00
print("\nKeywords for filenames and --filter:")
print("------------------------------------")
2017-05-17 14:31:14 +02:00
self.print_keywords(keywords)
raise exception.StopExtraction()
def handle_directory(self, keywords):
print("Keywords for directory names:")
print("-----------------------------")
self.print_keywords(keywords)
2015-11-13 01:02:49 +01:00
def handle_queue(self, url, keywords):
if not keywords:
self.extractor.log.info(
"This extractor delegates work to other extractors "
"and does not provide any keywords on its own. Try "
"'gallery-dl -K \"%s\"' instead.", url)
else:
print("Keywords for --chapter-filter:")
print("------------------------------")
self.print_keywords(keywords)
if self.extractor.categorytransfer:
print()
2017-09-30 18:52:23 +02:00
KeywordJob(url, self).run()
2017-08-10 17:36:21 +02:00
raise exception.StopExtraction()
2015-11-13 01:02:49 +01:00
@staticmethod
2017-05-17 14:31:14 +02:00
def print_keywords(keywords, prefix=""):
2015-12-12 01:16:02 +01:00
"""Print key-value pairs with formatting"""
2017-05-17 14:31:14 +02:00
suffix = "]" if prefix else ""
2015-11-13 01:02:49 +01:00
for key, value in sorted(keywords.items()):
2017-05-17 14:31:14 +02:00
key = prefix + key + suffix
if isinstance(value, dict):
2017-05-17 14:31:14 +02:00
KeywordJob.print_keywords(value, key + "[")
elif isinstance(value, list):
if value and isinstance(value[0], dict):
2017-05-17 14:31:14 +02:00
KeywordJob.print_keywords(value[0], key + "[][")
else:
2017-05-17 14:31:14 +02:00
print(key, "[]", sep="")
for val in value:
2017-05-17 14:31:14 +02:00
print(" -", val)
else:
# string or number
2017-05-17 14:31:14 +02:00
print(key, "\n ", value, sep="")
2015-12-10 02:14:28 +01:00
2015-12-12 00:11:05 +01:00
class UrlJob(Job):
"""Print download urls"""
2018-01-22 22:49:00 +01:00
maxdepth = 1
2017-09-30 18:52:23 +02:00
def __init__(self, url, parent=None, depth=1):
Job.__init__(self, url, parent)
self.depth = depth
2018-01-22 22:49:00 +01:00
if depth >= self.maxdepth:
self.handle_queue = self.handle_url
2015-12-10 02:14:28 +01:00
@staticmethod
def handle_url(url, _):
print(url)
2016-08-11 13:20:21 +02:00
@staticmethod
def handle_urllist(urls, _):
prefix = ""
for url in urls:
print(prefix, url, sep="")
prefix = "| "
def handle_queue(self, url, _):
try:
2017-09-30 18:52:23 +02:00
UrlJob(url, self, self.depth + 1).run()
except exception.NoExtractorError:
2017-05-27 16:16:57 +02:00
self._write_unsupported(url)
2015-12-12 01:16:02 +01:00
2017-02-26 02:06:56 +01:00
class TestJob(DownloadJob):
"""Generate test-results for extractor runs"""
2015-12-12 01:16:02 +01:00
class HashIO():
"""Minimal file-like interface"""
def __init__(self, hashobj):
self.hashobj = hashobj
self.path = ""
self.size = 0
self.has_extension = True
def __enter__(self):
return self
def __exit__(self, *args):
pass
2017-10-25 12:55:36 +02:00
def open(self, mode):
self.size = 0
return self
def write(self, content):
"""Update SHA1 hash"""
self.size += len(content)
self.hashobj.update(content)
def tell(self):
return self.size
2017-10-25 12:55:36 +02:00
def part_size(self):
return 0
def __init__(self, url, parent=None, content=False):
DownloadJob.__init__(self, url, parent)
2017-01-30 19:40:15 +01:00
self.content = content
self.list_url = []
self.list_keyword = []
self.list_archive = []
2017-01-30 19:40:15 +01:00
self.hash_url = hashlib.sha1()
2015-12-12 01:16:02 +01:00
self.hash_keyword = hashlib.sha1()
self.hash_archive = hashlib.sha1()
self.hash_content = hashlib.sha1()
if content:
self.fileobj = self.HashIO(self.hash_content)
2018-10-05 21:09:16 +02:00
self.get_downloader("http")._check_extension = lambda a, b: None
2015-12-12 01:16:02 +01:00
2017-02-26 02:06:56 +01:00
def run(self):
for msg in self.extractor:
self.dispatch(msg)
2017-02-26 02:06:56 +01:00
def handle_url(self, url, keywords):
self.update_url(url)
self.update_keyword(keywords)
self.update_archive(keywords)
self.update_content(url)
2015-12-12 01:16:02 +01:00
2018-01-19 22:54:15 +01:00
def handle_urllist(self, urls, keywords):
self.handle_url(urls[0], keywords)
def handle_directory(self, keywords):
self.update_keyword(keywords, False)
2015-12-12 01:16:02 +01:00
def handle_queue(self, url, keywords):
2015-12-12 01:16:02 +01:00
self.update_url(url)
self.update_keyword(keywords)
2015-12-12 01:16:02 +01:00
def update_url(self, url):
"""Update the URL hash"""
self.list_url.append(url)
2015-12-12 01:16:02 +01:00
self.hash_url.update(url.encode())
def update_keyword(self, kwdict, to_list=True):
"""Update the keyword hash"""
if to_list:
self.list_keyword.append(kwdict.copy())
2015-12-12 01:16:02 +01:00
self.hash_keyword.update(
json.dumps(kwdict, sort_keys=True).encode())
def update_archive(self, kwdict):
"""Update the archive-id hash"""
archive_id = self.extractor.archive_fmt.format_map(kwdict)
self.list_archive.append(archive_id)
self.hash_archive.update(archive_id.encode())
def update_content(self, url):
"""Update the content hash"""
if self.content:
2018-10-05 21:09:16 +02:00
scheme = url.partition(":")[0]
self.get_downloader(scheme).download(url, self.fileobj)
class DataJob(Job):
"""Collect extractor results and dump them"""
def __init__(self, url, parent=None, file=sys.stdout):
Job.__init__(self, url, parent)
self.file = file
self.data = []
def run(self):
# collect data
try:
for msg in self.extractor:
self.dispatch(msg)
except Exception as exc:
self.data.append((exc.__class__.__name__, str(exc)))
except BaseException:
pass
if config.get(("output", "num-to-str"), False):
for msg in self.data:
util.transform_dict(msg[-1], util.number_to_string)
# dump to 'file'
json.dump(
self.data, self.file,
sort_keys=True, indent=2,
ensure_ascii=config.get(("output", "ascii"), True),
)
self.file.write("\n")
def handle_url(self, url, keywords):
self.data.append((Message.Url, url, keywords.copy()))
def handle_urllist(self, urls, keywords):
self.data.append((Message.Urllist, list(urls), keywords.copy()))
def handle_directory(self, keywords):
self.data.append((Message.Directory, keywords.copy()))
def handle_queue(self, url, keywords):
self.data.append((Message.Queue, url, keywords.copy()))
def handle_finalize(self):
self.file.close()