2015-04-11 00:16:17 +02:00
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
|
|
|
|
# Copyright 2015 Mike Fährmann
|
|
|
|
#
|
|
|
|
# This program is free software; you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License version 2 as
|
|
|
|
# published by the Free Software Foundation.
|
|
|
|
|
|
|
|
"""Base classes for extractors for danbooru and co"""
|
|
|
|
|
2015-10-05 17:15:31 +02:00
|
|
|
from .common import Extractor, Message
|
2015-10-03 15:43:02 +02:00
|
|
|
from .. import text
|
2015-04-11 00:16:17 +02:00
|
|
|
import xml.etree.ElementTree as ET
|
|
|
|
import json
|
|
|
|
import urllib.parse
|
|
|
|
|
2015-10-05 17:15:31 +02:00
|
|
|
class BooruExtractor(Extractor):
|
2015-04-11 00:16:17 +02:00
|
|
|
|
2015-11-20 20:24:15 +01:00
|
|
|
info = {}
|
2015-04-11 00:16:17 +02:00
|
|
|
api_url = ""
|
|
|
|
|
2015-11-20 20:24:15 +01:00
|
|
|
def __init__(self):
|
2015-10-05 17:15:31 +02:00
|
|
|
Extractor.__init__(self)
|
2015-04-11 00:16:17 +02:00
|
|
|
self.page = "page"
|
2015-11-20 20:24:15 +01:00
|
|
|
self.headers = self.params = {}
|
2015-04-11 00:16:17 +02:00
|
|
|
|
|
|
|
def items(self):
|
|
|
|
yield Message.Version, 1
|
|
|
|
yield Message.Directory, self.get_job_metadata()
|
2015-04-15 22:24:27 +02:00
|
|
|
yield Message.Headers, self.headers
|
2015-04-11 00:16:17 +02:00
|
|
|
for data in self.items_impl():
|
2015-10-05 20:27:27 +02:00
|
|
|
try:
|
|
|
|
yield Message.Url, self.get_file_url(data), self.get_file_metadata(data)
|
|
|
|
except KeyError:
|
|
|
|
continue
|
2015-04-11 00:16:17 +02:00
|
|
|
|
|
|
|
def items_impl(self):
|
|
|
|
pass
|
|
|
|
|
|
|
|
def update_page(self, reset=False):
|
|
|
|
"""Update the value of the 'page' parameter"""
|
|
|
|
# Override this method in derived classes if necessary.
|
|
|
|
# It is usually enough to just adjust the 'page' attribute
|
|
|
|
if reset is False:
|
|
|
|
self.params[self.page] += 1
|
|
|
|
else:
|
|
|
|
self.params[self.page] = 1
|
|
|
|
|
|
|
|
def get_job_metadata(self):
|
|
|
|
"""Collect metadata for extractor-job"""
|
2015-11-20 20:24:15 +01:00
|
|
|
# Override this method in derived classes
|
2015-04-11 00:16:17 +02:00
|
|
|
return {
|
|
|
|
"category": self.info["category"],
|
|
|
|
}
|
|
|
|
|
|
|
|
def get_file_metadata(self, data):
|
|
|
|
"""Collect metadata for a downloadable file"""
|
|
|
|
data["category"] = self.info["category"]
|
2015-11-16 17:32:26 +01:00
|
|
|
return text.nameext_from_url(self.get_file_url(data), data)
|
2015-04-11 00:16:17 +02:00
|
|
|
|
|
|
|
def get_file_url(self, data):
|
|
|
|
"""Extract download-url from 'data'"""
|
|
|
|
url = data["file_url"]
|
|
|
|
if url.startswith("/"):
|
|
|
|
url = urllib.parse.urljoin(self.api_url, url)
|
|
|
|
return url
|
|
|
|
|
|
|
|
|
|
|
|
class JSONBooruExtractor(BooruExtractor):
|
|
|
|
|
|
|
|
def items_impl(self):
|
|
|
|
self.update_page(reset=True)
|
|
|
|
while True:
|
|
|
|
images = json.loads(
|
2015-04-15 22:24:27 +02:00
|
|
|
self.request(self.api_url, verify=True, params=self.params,
|
|
|
|
headers=self.headers).text
|
2015-04-11 00:16:17 +02:00
|
|
|
)
|
|
|
|
if len(images) == 0:
|
|
|
|
return
|
|
|
|
for data in images:
|
|
|
|
yield data
|
|
|
|
self.update_page()
|
|
|
|
|
|
|
|
|
|
|
|
class XMLBooruExtractor(BooruExtractor):
|
|
|
|
|
|
|
|
def items_impl(self):
|
|
|
|
self.update_page(reset=True)
|
|
|
|
while True:
|
|
|
|
root = ET.fromstring(
|
|
|
|
self.request(self.api_url, verify=True, params=self.params).text
|
|
|
|
)
|
|
|
|
if len(root) == 0:
|
|
|
|
return
|
|
|
|
for item in root:
|
|
|
|
yield item.attrib
|
|
|
|
self.update_page()
|