1
0
mirror of https://github.com/mikf/gallery-dl.git synced 2024-11-25 12:12:34 +01:00

[pixiv] update to new extractor interface

This commit is contained in:
Mike Fährmann 2015-04-10 15:29:09 +02:00
parent 7c8d787077
commit 1cd25b5369
2 changed files with 115 additions and 84 deletions

View File

@ -69,11 +69,11 @@ class SequentialExtractor(Extractor):
Extractor.__init__(self) Extractor.__init__(self)
class AsyncExtractor(Extractor): class AsynchronousExtractor(Extractor):
def __init__(self, config): def __init__(self, config):
Extractor.__init__(self) Extractor.__init__(self)
queue_size = int(config.get("queue-size", 5)) queue_size = int(config.get("general", "queue-size", fallback=5))
self.__queue = queue.Queue(maxsize=queue_size) self.__queue = queue.Queue(maxsize=queue_size)
self.__thread = threading.Thread(target=self.async_items) self.__thread = threading.Thread(target=self.async_items)
# self.__thread = threading.Thread(target=self.async_images, daemon=True) # self.__thread = threading.Thread(target=self.async_images, daemon=True)

View File

@ -1,94 +1,116 @@
from .common import AsyncExtractor # -*- coding: utf-8 -*-
from ..util import safe_request
# Copyright 2014, 2015 Mike Fährmann
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
"""Extract images and ugoira from http://www.pixiv.net/"""
from .common import AsynchronousExtractor
from .common import Message
from .common import safe_request
import re import re
import csv import csv
import requests import requests
class Extractor(AsyncExtractor): info = {
"category": "pixiv",
"extractor": "PixivExtractor",
"directory": ["{category}", "{artist-id}"],
"filename": "{category}_{artist-id}_{illust-id}{num}.{extension}",
"pattern": [
r"(?:https?://)?(?:www\.)?pixiv\.net/member(?:_illust)?\.php\?id=(\d+)",
],
}
class PixivExtractor(AsynchronousExtractor):
member_url = "http://www.pixiv.net/member_illust.php" member_url = "http://www.pixiv.net/member_illust.php"
illust_url = "http://www.pixiv.net/member_illust.php?mode=medium" illust_url = "http://www.pixiv.net/member_illust.php?mode=medium"
singl_v1_fmt = ("http://i{thumbnail-url[8]}.pixiv.net/img{directory:>02}"
"/img/{artist-nick}/{illust-id}.{extension}")
manga_v1_fmt = ("http://i{thumbnail-url[8]}.pixiv.net/img{directory:>02}"
"/img/{artist-nick}/{illust-id}{big}_p{index}.{extension}")
singl_v2_fmt = ("http://i{thumbnail-url[8]}.pixiv.net/img-original/img"
"/{url-date}/{illust-id}_p0.{extension}")
manga_v2_fmt = ("http://i{thumbnail-url[8]}.pixiv.net/img-original/img"
"/{url-date}/{illust-id}_p{index}.{extension}")
def __init__(self, match, config): def __init__(self, match, config):
AsyncExtractor.__init__(self, config) AsynchronousExtractor.__init__(self, config)
self.member_id = match.group(1) self.config = config
self.category = "pixiv" self.artist_id = match.group(1)
self.directory = self.member_id
self.session.cookies.update(config["pixiv-cookies"])
self.session.headers.update({"Referer": "http://www.pixiv.net/"})
self.api = PixivAPI(config["pixiv-cookies"]["PHPSESSID"]) self.api = PixivAPI(config["pixiv-cookies"]["PHPSESSID"])
def images(self): def items(self):
sname_fmt = "pixiv_{1}_{0}.{2}" yield Message.Version, 1
mname_fmt = "pixiv_{1}_{0}_p{num:02}.{2}" yield Message.Headers, {"Referer": "http://www.pixiv.net/"}
yield Message.Cookies, self.config["pixiv-cookies"]
yield Message.Directory, self.get_job_metadata()
singl_v1_fmt = "http://i{6[8]}.pixiv.net/img{4:>02}/img/{24}/{0}.{2}" for illust_id in self.get_illust_ids():
manga_v1_fmt = "http://i{6[8]}.pixiv.net/img{4:>02}/img/{24}/{0}{big}_p{num}.{2}" data = self.api.request(illust_id)
singl_v2_fmt = "http://i{6[8]}.pixiv.net/img-original/img/{date}/{0}_p0.{2}"
manga_v2_fmt = "http://i{6[8]}.pixiv.net/img-original/img/{date}/{0}_p{num}.{2}"
date = ""
big = ""
for img in self.image_ids():
data = self.api.request(img)
# debug # debug
# for i, value in enumerate(data): # for i, value in enumerate(data):
# print("{:02}: {}".format(i, value)) # print("{:02}: {}".format(i, value))
# return # return
# debug end # debug end
if "うごイラ" in data[13]: # if "うごイラ" in data["tags"]:
# ugoira / animations # ugoira / animations
try: # url, framelist = self.parse_ugoira(img)
url, framelist = self.parse_ugoira(img) # data[2] = "zip"
data[2] = "zip" # yield (url, sname_fmt.format(*data))
yield (url, sname_fmt.format(*data)) # data[2] = "txt"
data[2] = "txt" # yield (framelist, sname_fmt.format(*data))
yield (framelist, sname_fmt.format(*data)) # continue
continue
except:
print("[Warning] failed to get ugoira url; trying fallback")
# images # images
if img > 46270949: if illust_id > 46270949:
date = data[6][45:64] big = ""
url_s_fmt = singl_v2_fmt url_s_fmt = self.singl_v2_fmt
url_m_fmt = manga_v2_fmt url_m_fmt = self.manga_v2_fmt
else: else:
big = "_big" if img > 11319935 else "" big = "_big" if illust_id > 11319935 else ""
url_s_fmt = singl_v1_fmt url_s_fmt = self.singl_v1_fmt
url_m_fmt = manga_v1_fmt url_m_fmt = self.manga_v1_fmt
if not data[19]: if not data["count"]:
yield (url_s_fmt.format(*data, date=date), sname_fmt.format(*data)) yield Message.Url, url_s_fmt.format(**data), data
else: else:
for i in range(0, int(data[19])): for i in range(0, int(data["count"])):
yield (url_m_fmt.format(*data, num=i, date=date, big=big), data["num"] = "_p{:02}".format(i)
mname_fmt.format(*data, num=i)) yield (Message.Url,
url_m_fmt.format(index=i, big=big, **data),
data.copy())
def image_ids(self): def get_illust_ids(self):
"""generator -- yield all image ids""" """Yield all illust-ids for a pixiv-member"""
needle = '<a href="/member_illust.php?mode=medium&amp;illust_id=' needle = ('<li class="image-item "><a href="'
params = {"id": self.member_id, "p": 1} '/member_illust.php?mode=medium&amp;illust_id=')
params = {"id": self.artist_id, "p": 1}
pos = 0
while True: while True:
text = self.request(self.member_url, params=params).text text = self.request(self.member_url, params=params).text
end = 0
found = 0 found = 0
while True: while True:
pos = text.find(needle, end) illust_id, pos = self.extract(text, needle, '"', pos)
if pos == -1: if illust_id is None:
break break
pos += len(needle)
end = text.find('"', pos)
found += 1 found += 1
yield int(text[pos:end]) yield int(illust_id)
if found != 20: if found != 20:
return return
params["p"] += 1 params["p"] += 1
def parse_ugoira(self, illust_id): def parse_ugoira(self, illust_id):
"""Parse ugoira data"""
# get illust page # get illust page
text = self.request( text = self.request(
self.illust_url, self.illust_url,
@ -108,9 +130,15 @@ class Extractor(AsyncExtractor):
r'\1 \2\n', r'\1 \2\n',
frames frames
) )
return url, framelist return url, framelist
def get_job_metadata(self):
"""Collect metadata for extractor-job"""
return {
"category": info["category"],
"artist-id": self.artist_id,
}
class PixivAPI(): class PixivAPI():
api_url = "http://spapi.pixiv.net/iphone/illust.php" api_url = "http://spapi.pixiv.net/iphone/illust.php"
@ -120,31 +148,34 @@ class PixivAPI():
self.session.params["PHPSESSID"] = session_id self.session.params["PHPSESSID"] = session_id
def request(self, illust_id): def request(self, illust_id):
while True: data = next(csv.reader(
[self.api_call(illust_id)]
))
return {
"category": info["category"],
"illust-id": data[0],
"artist-id": data[1],
"extension": data[2],
"title": data[3],
"directory": data[4],
"artist-name": data[5],
"thumbnail-url": data[6],
"url-date": data[6][45:64],
# "thumbnail-mobile-url": data[9],
"date": data[12],
"tags": data[13],
# "description": data[18],
"count": data[19],
"artist-nick": data[24],
# "artist-avatar-url": data[29],
"num": "",
}
def api_call(self, illust_id):
text = ""
while len(text) < 32:
text = safe_request( text = safe_request(
self.session, self.session, self.api_url,
self.api_url,
params={"illust_id": illust_id} params={"illust_id": illust_id}
).text ).text
if len(text) > 31: return text
return next(csv.reader([text]))
# class FileDict(dict):
#
# def __init__(self, *args):
# super().__init__()
# self.re = re.compile(r"pixiv_\d+_(?P<id>\d+)(?P<extra>_p\d+)?\.[a-z]{3}")
# for arg in args:
# self.load_from(arg)
#
# def load_from(self, directory):
# match = self.re.match
# for file in os.listdir(directory):
# m = match(file)
# if m is None:
# continue
# val = True if m.group("extra") else False
# dict.__setitem__(self, m.group("id"), val)
#
# def __getitem__(self, key):
# return dict.get(self, key)