2015-04-11 00:16:17 +02:00
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
|
2017-03-04 23:21:55 +01:00
|
|
|
# Copyright 2015-2017 Mike Fährmann
|
2015-04-11 00:16:17 +02:00
|
|
|
#
|
|
|
|
# This program is free software; you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License version 2 as
|
|
|
|
# published by the Free Software Foundation.
|
|
|
|
|
|
|
|
"""Base classes for extractors for danbooru and co"""
|
|
|
|
|
2015-10-05 17:15:31 +02:00
|
|
|
from .common import Extractor, Message
|
2015-10-03 15:43:02 +02:00
|
|
|
from .. import text
|
2015-04-11 00:16:17 +02:00
|
|
|
import xml.etree.ElementTree as ET
|
|
|
|
import json
|
|
|
|
import urllib.parse
|
|
|
|
|
2017-02-01 00:53:19 +01:00
|
|
|
|
2015-10-05 17:15:31 +02:00
|
|
|
class BooruExtractor(Extractor):
|
2016-09-12 10:20:57 +02:00
|
|
|
"""Base class for all booru extractors"""
|
2015-11-20 20:24:15 +01:00
|
|
|
info = {}
|
2015-11-21 01:48:44 +01:00
|
|
|
headers = {}
|
2017-03-04 23:21:55 +01:00
|
|
|
pagestart = 1
|
|
|
|
pagekey = "page"
|
2015-04-11 00:16:17 +02:00
|
|
|
api_url = ""
|
2015-11-21 01:48:44 +01:00
|
|
|
category = ""
|
2015-04-11 00:16:17 +02:00
|
|
|
|
2015-11-20 20:24:15 +01:00
|
|
|
def __init__(self):
|
2015-10-05 17:15:31 +02:00
|
|
|
Extractor.__init__(self)
|
2015-11-21 00:54:29 +01:00
|
|
|
self.params = {"limit": 50}
|
2015-11-21 02:40:30 +01:00
|
|
|
self.setup()
|
2015-04-11 00:16:17 +02:00
|
|
|
|
|
|
|
def items(self):
|
|
|
|
yield Message.Version, 1
|
|
|
|
yield Message.Directory, self.get_job_metadata()
|
2015-04-15 22:24:27 +02:00
|
|
|
yield Message.Headers, self.headers
|
2015-04-11 00:16:17 +02:00
|
|
|
for data in self.items_impl():
|
2015-10-05 20:27:27 +02:00
|
|
|
try:
|
2017-02-01 00:53:19 +01:00
|
|
|
url = self.get_file_url(data)
|
|
|
|
data = self.get_file_metadata(data)
|
|
|
|
yield Message.Url, url, data
|
2015-10-05 20:27:27 +02:00
|
|
|
except KeyError:
|
|
|
|
continue
|
2015-04-11 00:16:17 +02:00
|
|
|
|
2017-03-04 23:21:55 +01:00
|
|
|
def skip(self, num):
|
|
|
|
limit = self.params["limit"]
|
|
|
|
pages = num // limit
|
|
|
|
self.pagestart += pages
|
|
|
|
return pages * limit
|
|
|
|
|
2015-04-11 00:16:17 +02:00
|
|
|
def items_impl(self):
|
|
|
|
pass
|
|
|
|
|
2015-11-21 02:40:30 +01:00
|
|
|
def setup(self):
|
|
|
|
pass
|
|
|
|
|
2015-04-11 00:16:17 +02:00
|
|
|
def update_page(self, reset=False):
|
|
|
|
"""Update the value of the 'page' parameter"""
|
|
|
|
# Override this method in derived classes if necessary.
|
|
|
|
# It is usually enough to just adjust the 'page' attribute
|
|
|
|
if reset is False:
|
2017-03-04 23:21:55 +01:00
|
|
|
self.params[self.pagekey] += 1
|
2015-04-11 00:16:17 +02:00
|
|
|
else:
|
2017-03-04 23:21:55 +01:00
|
|
|
self.params[self.pagekey] = self.pagestart
|
2015-04-11 00:16:17 +02:00
|
|
|
|
|
|
|
def get_job_metadata(self):
|
|
|
|
"""Collect metadata for extractor-job"""
|
2015-11-20 20:24:15 +01:00
|
|
|
# Override this method in derived classes
|
2016-09-25 14:22:07 +02:00
|
|
|
return {}
|
2015-04-11 00:16:17 +02:00
|
|
|
|
|
|
|
def get_file_metadata(self, data):
|
|
|
|
"""Collect metadata for a downloadable file"""
|
2015-11-16 17:32:26 +01:00
|
|
|
return text.nameext_from_url(self.get_file_url(data), data)
|
2015-04-11 00:16:17 +02:00
|
|
|
|
|
|
|
def get_file_url(self, data):
|
|
|
|
"""Extract download-url from 'data'"""
|
|
|
|
url = data["file_url"]
|
|
|
|
if url.startswith("/"):
|
|
|
|
url = urllib.parse.urljoin(self.api_url, url)
|
|
|
|
return url
|
|
|
|
|
|
|
|
|
|
|
|
class JSONBooruExtractor(BooruExtractor):
|
2016-09-12 10:20:57 +02:00
|
|
|
"""Base class for JSON based API responses"""
|
2015-04-11 00:16:17 +02:00
|
|
|
def items_impl(self):
|
|
|
|
self.update_page(reset=True)
|
|
|
|
while True:
|
|
|
|
images = json.loads(
|
2017-02-01 00:53:19 +01:00
|
|
|
self.request(self.api_url, params=self.params,
|
2015-04-15 22:24:27 +02:00
|
|
|
headers=self.headers).text
|
2015-04-11 00:16:17 +02:00
|
|
|
)
|
|
|
|
for data in images:
|
|
|
|
yield data
|
2015-11-21 00:54:29 +01:00
|
|
|
if len(images) < self.params["limit"]:
|
|
|
|
return
|
2015-04-11 00:16:17 +02:00
|
|
|
self.update_page()
|
|
|
|
|
|
|
|
|
|
|
|
class XMLBooruExtractor(BooruExtractor):
|
2016-09-12 10:20:57 +02:00
|
|
|
"""Base class for XML based API responses"""
|
2015-04-11 00:16:17 +02:00
|
|
|
def items_impl(self):
|
|
|
|
self.update_page(reset=True)
|
|
|
|
while True:
|
|
|
|
root = ET.fromstring(
|
2017-02-01 00:53:19 +01:00
|
|
|
self.request(self.api_url, params=self.params).text
|
2015-04-11 00:16:17 +02:00
|
|
|
)
|
|
|
|
for item in root:
|
|
|
|
yield item.attrib
|
2015-11-21 00:54:29 +01:00
|
|
|
if len(root) < self.params["limit"]:
|
|
|
|
return
|
2015-04-11 00:16:17 +02:00
|
|
|
self.update_page()
|
2015-11-21 00:54:29 +01:00
|
|
|
|
|
|
|
|
|
|
|
class BooruTagExtractor(BooruExtractor):
|
2016-09-12 10:20:57 +02:00
|
|
|
"""Extractor for images based on search-tags"""
|
2015-11-21 00:54:29 +01:00
|
|
|
directory_fmt = ["{category}", "{tags}"]
|
|
|
|
filename_fmt = "{category}_{id}_{md5}.{extension}"
|
|
|
|
|
|
|
|
def __init__(self, match):
|
|
|
|
BooruExtractor.__init__(self)
|
2017-05-04 11:58:51 +02:00
|
|
|
self.tags = text.unquote(match.group(1).replace("+", " "))
|
2015-11-21 00:54:29 +01:00
|
|
|
self.params["tags"] = self.tags
|
|
|
|
|
|
|
|
def get_job_metadata(self):
|
2016-09-25 14:22:07 +02:00
|
|
|
return {"tags": self.tags}
|
2015-11-21 00:54:29 +01:00
|
|
|
|
|
|
|
|
|
|
|
class BooruPoolExtractor(BooruExtractor):
|
2016-09-12 10:20:57 +02:00
|
|
|
"""Extractor for image-pools"""
|
2015-11-21 00:54:29 +01:00
|
|
|
directory_fmt = ["{category}", "pool", "{pool}"]
|
|
|
|
filename_fmt = "{category}_{id}_{md5}.{extension}"
|
|
|
|
|
|
|
|
def __init__(self, match):
|
|
|
|
BooruExtractor.__init__(self)
|
|
|
|
self.pool = match.group(1)
|
|
|
|
self.params["tags"] = "pool:" + self.pool
|
|
|
|
|
|
|
|
def get_job_metadata(self):
|
2016-09-25 14:22:07 +02:00
|
|
|
return {"pool": self.pool}
|
2015-11-21 00:54:29 +01:00
|
|
|
|
|
|
|
|
|
|
|
class BooruPostExtractor(BooruExtractor):
|
2016-09-12 10:20:57 +02:00
|
|
|
"""Extractor for single images"""
|
2015-11-21 00:54:29 +01:00
|
|
|
directory_fmt = ["{category}"]
|
|
|
|
filename_fmt = "{category}_{id}_{md5}.{extension}"
|
|
|
|
|
|
|
|
def __init__(self, match):
|
|
|
|
BooruExtractor.__init__(self)
|
|
|
|
self.post = match.group(1)
|
|
|
|
self.params["tags"] = "id:" + self.post
|