2015-04-11 00:16:17 +02:00
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
|
|
|
|
# Copyright 2015 Mike Fährmann
|
|
|
|
#
|
|
|
|
# This program is free software; you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License version 2 as
|
|
|
|
# published by the Free Software Foundation.
|
|
|
|
|
|
|
|
"""Base classes for extractors for danbooru and co"""
|
|
|
|
|
2015-10-05 17:15:31 +02:00
|
|
|
from .common import Extractor, Message
|
2015-10-03 15:43:02 +02:00
|
|
|
from .. import text
|
2015-04-11 00:16:17 +02:00
|
|
|
import xml.etree.ElementTree as ET
|
|
|
|
import json
|
|
|
|
import urllib.parse
|
|
|
|
|
2015-10-05 17:15:31 +02:00
|
|
|
class BooruExtractor(Extractor):
|
2015-04-11 00:16:17 +02:00
|
|
|
|
2015-11-20 20:24:15 +01:00
|
|
|
info = {}
|
2015-04-11 00:16:17 +02:00
|
|
|
api_url = ""
|
|
|
|
|
2015-11-20 20:24:15 +01:00
|
|
|
def __init__(self):
|
2015-10-05 17:15:31 +02:00
|
|
|
Extractor.__init__(self)
|
2015-04-11 00:16:17 +02:00
|
|
|
self.page = "page"
|
2015-11-21 00:54:29 +01:00
|
|
|
self.params = {"limit": 50}
|
|
|
|
self.headers = {}
|
2015-04-11 00:16:17 +02:00
|
|
|
|
|
|
|
def items(self):
|
|
|
|
yield Message.Version, 1
|
|
|
|
yield Message.Directory, self.get_job_metadata()
|
2015-04-15 22:24:27 +02:00
|
|
|
yield Message.Headers, self.headers
|
2015-04-11 00:16:17 +02:00
|
|
|
for data in self.items_impl():
|
2015-10-05 20:27:27 +02:00
|
|
|
try:
|
|
|
|
yield Message.Url, self.get_file_url(data), self.get_file_metadata(data)
|
|
|
|
except KeyError:
|
|
|
|
continue
|
2015-04-11 00:16:17 +02:00
|
|
|
|
|
|
|
def items_impl(self):
|
|
|
|
pass
|
|
|
|
|
|
|
|
def update_page(self, reset=False):
|
|
|
|
"""Update the value of the 'page' parameter"""
|
|
|
|
# Override this method in derived classes if necessary.
|
|
|
|
# It is usually enough to just adjust the 'page' attribute
|
|
|
|
if reset is False:
|
|
|
|
self.params[self.page] += 1
|
|
|
|
else:
|
|
|
|
self.params[self.page] = 1
|
|
|
|
|
|
|
|
def get_job_metadata(self):
|
|
|
|
"""Collect metadata for extractor-job"""
|
2015-11-20 20:24:15 +01:00
|
|
|
# Override this method in derived classes
|
2015-04-11 00:16:17 +02:00
|
|
|
return {
|
2015-11-21 00:54:29 +01:00
|
|
|
"category": self.category,
|
2015-04-11 00:16:17 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
def get_file_metadata(self, data):
|
|
|
|
"""Collect metadata for a downloadable file"""
|
2015-11-21 00:54:29 +01:00
|
|
|
data["category"] = self.category
|
2015-11-16 17:32:26 +01:00
|
|
|
return text.nameext_from_url(self.get_file_url(data), data)
|
2015-04-11 00:16:17 +02:00
|
|
|
|
|
|
|
def get_file_url(self, data):
|
|
|
|
"""Extract download-url from 'data'"""
|
|
|
|
url = data["file_url"]
|
|
|
|
if url.startswith("/"):
|
|
|
|
url = urllib.parse.urljoin(self.api_url, url)
|
|
|
|
return url
|
|
|
|
|
|
|
|
|
|
|
|
class JSONBooruExtractor(BooruExtractor):
|
|
|
|
|
|
|
|
def items_impl(self):
|
|
|
|
self.update_page(reset=True)
|
|
|
|
while True:
|
|
|
|
images = json.loads(
|
2015-04-15 22:24:27 +02:00
|
|
|
self.request(self.api_url, verify=True, params=self.params,
|
|
|
|
headers=self.headers).text
|
2015-04-11 00:16:17 +02:00
|
|
|
)
|
|
|
|
for data in images:
|
|
|
|
yield data
|
2015-11-21 00:54:29 +01:00
|
|
|
if len(images) < self.params["limit"]:
|
|
|
|
return
|
2015-04-11 00:16:17 +02:00
|
|
|
self.update_page()
|
|
|
|
|
|
|
|
|
|
|
|
class XMLBooruExtractor(BooruExtractor):
|
|
|
|
|
|
|
|
def items_impl(self):
|
|
|
|
self.update_page(reset=True)
|
|
|
|
while True:
|
|
|
|
root = ET.fromstring(
|
|
|
|
self.request(self.api_url, verify=True, params=self.params).text
|
|
|
|
)
|
|
|
|
for item in root:
|
|
|
|
yield item.attrib
|
2015-11-21 00:54:29 +01:00
|
|
|
if len(root) < self.params["limit"]:
|
|
|
|
return
|
2015-04-11 00:16:17 +02:00
|
|
|
self.update_page()
|
2015-11-21 00:54:29 +01:00
|
|
|
|
|
|
|
|
|
|
|
class BooruTagExtractor(BooruExtractor):
|
|
|
|
"""Extract images based on search-tags"""
|
|
|
|
|
|
|
|
directory_fmt = ["{category}", "{tags}"]
|
|
|
|
filename_fmt = "{category}_{id}_{md5}.{extension}"
|
|
|
|
|
|
|
|
def __init__(self, match):
|
|
|
|
BooruExtractor.__init__(self)
|
|
|
|
self.tags = text.unquote(match.group(1))
|
|
|
|
self.params["tags"] = self.tags
|
|
|
|
|
|
|
|
def get_job_metadata(self):
|
|
|
|
return {
|
|
|
|
"category": self.category,
|
|
|
|
"tags": self.tags,
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
class BooruPoolExtractor(BooruExtractor):
|
|
|
|
"""Extract image-pools"""
|
|
|
|
|
|
|
|
directory_fmt = ["{category}", "pool", "{pool}"]
|
|
|
|
filename_fmt = "{category}_{id}_{md5}.{extension}"
|
|
|
|
|
|
|
|
def __init__(self, match):
|
|
|
|
BooruExtractor.__init__(self)
|
|
|
|
self.pool = match.group(1)
|
|
|
|
self.params["tags"] = "pool:" + self.pool
|
|
|
|
|
|
|
|
def get_job_metadata(self):
|
|
|
|
return {
|
|
|
|
"category": self.category,
|
|
|
|
"pool": self.pool,
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
class BooruPostExtractor(BooruExtractor):
|
|
|
|
"""Extract single images"""
|
|
|
|
|
|
|
|
directory_fmt = ["{category}"]
|
|
|
|
filename_fmt = "{category}_{id}_{md5}.{extension}"
|
|
|
|
|
|
|
|
def __init__(self, match):
|
|
|
|
BooruExtractor.__init__(self)
|
|
|
|
self.post = match.group(1)
|
|
|
|
self.params["tags"] = "id:" + self.post
|