1
0
mirror of https://github.com/mikf/gallery-dl.git synced 2024-11-24 11:42:33 +01:00
gallery-dl/gallery_dl/extractor/booru.py

102 lines
3.0 KiB
Python
Raw Normal View History

2015-04-11 00:16:17 +02:00
# -*- coding: utf-8 -*-
# Copyright 2015 Mike Fährmann
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
"""Base classes for extractors for danbooru and co"""
2015-10-03 15:43:02 +02:00
from .common import SequentialExtractor, Message
from .. import text
2015-04-11 00:16:17 +02:00
import xml.etree.ElementTree as ET
import json
2015-04-15 17:35:30 +02:00
import os.path
2015-04-11 00:16:17 +02:00
import urllib.parse
class BooruExtractor(SequentialExtractor):
api_url = ""
def __init__(self, match, config, info):
SequentialExtractor.__init__(self, config)
self.info = info
2015-10-03 15:43:02 +02:00
self.tags = text.unquote(match.group(1))
2015-04-11 00:16:17 +02:00
self.page = "page"
self.params = {"tags": self.tags}
2015-04-15 22:24:27 +02:00
self.headers = {}
2015-04-11 00:16:17 +02:00
def items(self):
yield Message.Version, 1
yield Message.Directory, self.get_job_metadata()
2015-04-15 22:24:27 +02:00
yield Message.Headers, self.headers
2015-04-11 00:16:17 +02:00
for data in self.items_impl():
yield Message.Url, self.get_file_url(data), self.get_file_metadata(data)
def items_impl(self):
pass
def update_page(self, reset=False):
"""Update the value of the 'page' parameter"""
# Override this method in derived classes if necessary.
# It is usually enough to just adjust the 'page' attribute
if reset is False:
self.params[self.page] += 1
else:
self.params[self.page] = 1
def get_job_metadata(self):
"""Collect metadata for extractor-job"""
return {
"category": self.info["category"],
"tags": self.tags.replace("/", "_"),
}
def get_file_metadata(self, data):
"""Collect metadata for a downloadable file"""
data["category"] = self.info["category"]
2015-10-03 15:43:02 +02:00
data["name"] = text.unquote(
text.filename_from_url(self.get_file_url(data))
2015-04-15 17:35:30 +02:00
)
data["extension"] = os.path.splitext(data["name"])[1][1:]
2015-04-11 00:16:17 +02:00
return data
def get_file_url(self, data):
"""Extract download-url from 'data'"""
url = data["file_url"]
if url.startswith("/"):
url = urllib.parse.urljoin(self.api_url, url)
return url
class JSONBooruExtractor(BooruExtractor):
def items_impl(self):
self.update_page(reset=True)
while True:
images = json.loads(
2015-04-15 22:24:27 +02:00
self.request(self.api_url, verify=True, params=self.params,
headers=self.headers).text
2015-04-11 00:16:17 +02:00
)
if len(images) == 0:
return
for data in images:
yield data
self.update_page()
class XMLBooruExtractor(BooruExtractor):
def items_impl(self):
self.update_page(reset=True)
while True:
root = ET.fromstring(
self.request(self.api_url, verify=True, params=self.params).text
)
if len(root) == 0:
return
for item in root:
yield item.attrib
self.update_page()