1
0
mirror of https://github.com/mikf/gallery-dl.git synced 2024-11-22 10:42:34 +01:00

[sankaku] re-enable extractor

This commit is contained in:
Mike Fährmann 2015-11-09 02:29:33 +01:00
parent 36b376b5ba
commit 60833abcc6
No known key found for this signature in database
GPG Key ID: 5680CA389D365A88
2 changed files with 73 additions and 26 deletions

View File

@ -37,6 +37,7 @@ modules = [
"powermanga", "powermanga",
"redhawkscans", "redhawkscans",
"safebooru", "safebooru",
"sankaku",
"yandere", "yandere",
] ]

View File

@ -1,35 +1,81 @@
from .common import AsyncExtractor # -*- coding: utf-8 -*-
from ..util import filename_from_url
class Extractor(AsyncExtractor): # Copyright 2014, 2015 Mike Fährmann
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
"""Extract images from https://chan.sankakucomplex.com/"""
from .common import Extractor, Message
from .. import text
import os.path
info = {
"category": "sankaku",
"extractor": "SankakuExtractor",
"directory": ["{category}", "{tags}"],
"filename": "{category}_{id}_{md5}.{extension}",
"pattern": [
r"(?:https?://)?chan\.sankakucomplex\.com/\?tags=([^&]+)",
],
}
class SankakuExtractor(Extractor):
url = "https://chan.sankakucomplex.com/" url = "https://chan.sankakucomplex.com/"
def __init__(self, match, config): def __init__(self, match):
AsyncExtractor.__init__(self, config) Extractor.__init__(self)
self.tags = match.group(1) self.tags = text.unquote(match.group(1))
self.category = "sankaku" self.session.headers["User-Agent"] = (
self.directory = self.tags.replace("/", "_") "Mozilla/5.0 Gecko/20100101 Firefox/40.0"
self.enable_useragent() )
def images(self): def items(self):
needle = ' src="//c.sankakucomplex.com/data/preview/' yield Message.Version, 1
params = {"tags": self.tags, "page":1} data = self.get_job_metadata()
yield Message.Directory, data
for image in self.get_images():
data.update(image)
yield Message.Url, image["file-url"], data
def get_job_metadata(self):
"""Collect metadata for extractor-job"""
return {
"category": info["category"],
"tags": self.tags,
}
def get_images(self):
image = {}
params = {
"tags": self.tags,
"page": 1,
}
while True: while True:
text = self.request(self.url, params=params).text pos = 0
print(text) count = 0
return page = self.request(self.url, params=params).text
pos = 0
found = 0
while True: while True:
try: image["id"], pos = text.extract(page,
url, pos = self.extract(text, needle, '"', pos) '<span class="thumb blacklisted" id=p', '>', pos)
found += 1 if not image["id"]:
print("https://cs.sankakucomplex.com/data/" + url)
yield ("https://cs.sankakucomplex.com/data/" + url,
"%s_%s" % (self.category, filename_from_url(url)))
except:
break break
if found == 0: url , pos = text.extract(page, ' src="//c.sankakucomplex.com/', '"', pos)
break tags, pos = text.extract(page, ' title="', '"', pos)
self.get_image_metadata(image, url)
count += 1
yield image
if count < 20:
return
params["page"] += 1 params["page"] += 1
@staticmethod
def get_image_metadata(image, url):
image["file-url"] = "https://cs.sankakucomplex.com/data/" + url[13:]
filename = text.filename_from_url(url)
name, ext = os.path.splitext(filename)
image["name"] = image["md5"] = name
image["extension"] = ext[1:]