1
0
mirror of https://github.com/mikf/gallery-dl.git synced 2024-11-24 03:32:33 +01:00
gallery-dl/gallery_dl/extractor/sankaku.py

80 lines
2.6 KiB
Python
Raw Normal View History

2015-11-09 02:29:33 +01:00
# -*- coding: utf-8 -*-
2014-10-12 21:56:44 +02:00
2015-11-09 02:29:33 +01:00
# Copyright 2014, 2015 Mike Fährmann
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
"""Extract images from https://chan.sankakucomplex.com/"""
2015-11-10 00:55:01 +01:00
from .common import AsynchronousExtractor, Message
2015-11-09 02:29:33 +01:00
from .. import text
2015-11-10 00:55:01 +01:00
class SankakuExtractor(AsynchronousExtractor):
2014-10-12 21:56:44 +02:00
2015-11-21 04:26:30 +01:00
category = "sankaku"
directory_fmt = ["{category}", "{tags}"]
filename_fmt = "{category}_{id}_{md5}.{extension}"
pattern = [r"(?:https?://)?chan\.sankakucomplex\.com/\?tags=([^&]+)"]
2014-10-12 21:56:44 +02:00
url = "https://chan.sankakucomplex.com/"
2015-11-09 02:29:33 +01:00
def __init__(self, match):
2015-11-10 00:55:01 +01:00
AsynchronousExtractor.__init__(self)
2015-11-09 02:29:33 +01:00
self.tags = text.unquote(match.group(1))
self.session.headers["User-Agent"] = (
"Mozilla/5.0 Gecko/20100101 Firefox/40.0"
)
2014-10-12 21:56:44 +02:00
2015-11-09 02:29:33 +01:00
def items(self):
data = self.get_job_metadata()
2015-11-10 00:55:01 +01:00
yield Message.Version, 1
yield Message.Headers, self.session.headers
2015-11-09 02:29:33 +01:00
yield Message.Directory, data
for image in self.get_images():
2015-11-10 00:55:01 +01:00
image.update(data)
yield Message.Url, image["file-url"], image
2015-11-09 02:29:33 +01:00
def get_job_metadata(self):
"""Collect metadata for extractor-job"""
return {
2015-11-21 04:26:30 +01:00
"category": self.category,
2015-11-09 02:29:33 +01:00
"tags": self.tags,
}
def get_images(self):
params = {
"tags": self.tags,
"page": 1,
}
2014-10-12 21:56:44 +02:00
while True:
2015-11-09 02:29:33 +01:00
count = 0
page = self.request(self.url, params=params).text
2015-11-10 00:55:01 +01:00
pos = text.extract(page, '<div id=more-popular-posts-link>', '')[1]
2014-10-12 21:56:44 +02:00
while True:
2015-11-10 00:55:01 +01:00
image_id, pos = text.extract(page,
2015-11-09 02:29:33 +01:00
'<span class="thumb blacklisted" id=p', '>', pos)
2015-11-10 00:55:01 +01:00
if not image_id:
2014-10-12 21:56:44 +02:00
break
2015-11-10 00:55:01 +01:00
image = self.get_image_metadata(image_id)
2015-11-09 02:29:33 +01:00
count += 1
yield image
if count < 20:
return
2014-10-12 21:56:44 +02:00
params["page"] += 1
2015-11-09 02:29:33 +01:00
2015-11-10 00:55:01 +01:00
def get_image_metadata(self, image_id):
url = "https://chan.sankakucomplex.com/post/show/" + image_id
page = self.request(url).text
image_url, pos = text.extract(page, '<li>Original: <a href="', '"')
width , pos = text.extract(page, '>', 'x', pos)
height , pos = text.extract(page, '', ' ', pos)
2015-11-16 17:32:26 +01:00
data = text.nameext_from_url(image_url, {
2015-11-10 00:55:01 +01:00
"id": image_id,
"file-url": "https:" + image_url,
"width": width,
"height": height,
2015-11-16 17:32:26 +01:00
})
data["md5"] = data["name"]
return data