2015-10-28 12:08:27 +01:00
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
|
2017-04-20 13:20:41 +02:00
|
|
|
# Copyright 2015-2017 Mike Fährmann
|
2015-10-28 12:08:27 +01:00
|
|
|
#
|
|
|
|
# This program is free software; you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License version 2 as
|
|
|
|
# published by the Free Software Foundation.
|
|
|
|
|
2017-04-20 13:20:41 +02:00
|
|
|
"""Extract images from https://nhentai.net/"""
|
2015-10-28 12:08:27 +01:00
|
|
|
|
|
|
|
from .common import Extractor, Message
|
|
|
|
from .. import text
|
|
|
|
import json
|
|
|
|
|
2017-02-01 00:53:19 +01:00
|
|
|
|
2016-09-12 10:20:57 +02:00
|
|
|
class NhentaiGalleryExtractor(Extractor):
|
2017-06-28 17:39:07 +02:00
|
|
|
"""Extractor for image galleries from nhentai.net"""
|
2015-11-21 04:26:30 +01:00
|
|
|
category = "nhentai"
|
2016-10-16 13:40:14 +02:00
|
|
|
subcategory = "gallery"
|
2017-09-10 22:20:47 +02:00
|
|
|
directory_fmt = ["{category}", "{gallery_id} {title}"]
|
|
|
|
filename_fmt = "{category}_{gallery_id}_{num:>03}.{extension}"
|
2015-11-21 04:26:30 +01:00
|
|
|
pattern = [r"(?:https?://)?(?:www\.)?nhentai\.net/g/(\d+)"]
|
2015-12-13 04:36:44 +01:00
|
|
|
test = [("http://nhentai.net/g/147850/", {
|
2017-01-27 22:43:50 +01:00
|
|
|
"url": "5179dbf0f96af44005a0ff705a0ad64ac26547d0",
|
2017-09-10 22:20:47 +02:00
|
|
|
"keyword": "82751294e75fc203b019ffd94d8c1f94a5b86494",
|
2015-12-13 04:36:44 +01:00
|
|
|
})]
|
2015-11-21 04:26:30 +01:00
|
|
|
|
2015-10-28 12:08:27 +01:00
|
|
|
def __init__(self, match):
|
|
|
|
Extractor.__init__(self)
|
|
|
|
self.gid = match.group(1)
|
|
|
|
|
|
|
|
def items(self):
|
|
|
|
ginfo = self.get_gallery_info()
|
|
|
|
data = self.get_job_metadata(ginfo)
|
2017-02-01 00:53:19 +01:00
|
|
|
urlfmt = "{}galleries/{}/{{}}.{{}}".format(
|
2017-09-10 22:20:47 +02:00
|
|
|
ginfo["media_url"], data["media_id"])
|
2015-10-28 12:08:27 +01:00
|
|
|
extdict = {"j": "jpg", "p": "png", "g": "gif"}
|
|
|
|
yield Message.Version, 1
|
|
|
|
yield Message.Directory, data
|
2017-01-27 22:43:50 +01:00
|
|
|
for data["num"], image in enumerate(ginfo["images"]["pages"], 1):
|
2015-10-28 12:08:27 +01:00
|
|
|
ext = extdict.get(image["t"], "jpg")
|
|
|
|
data["width"] = image["w"]
|
|
|
|
data["height"] = image["h"]
|
|
|
|
data["extension"] = ext
|
2017-01-27 22:43:50 +01:00
|
|
|
yield Message.Url, urlfmt.format(data["num"], ext), data
|
2015-10-28 12:08:27 +01:00
|
|
|
|
|
|
|
def get_gallery_info(self):
|
|
|
|
"""Extract and return gallery-info"""
|
2017-04-20 13:20:41 +02:00
|
|
|
page = self.request("https://nhentai.net/g/" + self.gid + "/1/").text
|
2017-02-01 00:53:19 +01:00
|
|
|
media_url, pos = text.extract(
|
2017-12-17 16:25:06 +01:00
|
|
|
page, "media_url: '", "'")
|
2017-02-01 00:53:19 +01:00
|
|
|
json_data, pos = text.extract(
|
|
|
|
page, "gallery: ", ",\n", pos)
|
2017-12-17 16:25:06 +01:00
|
|
|
if json_data.startswith("b'"):
|
|
|
|
json_data = json_data[2:-1].replace(r"\\u", r"\u")
|
|
|
|
|
2015-10-28 12:08:27 +01:00
|
|
|
json_dict = json.loads(json_data)
|
|
|
|
json_dict["media_url"] = media_url
|
|
|
|
return json_dict
|
|
|
|
|
|
|
|
def get_job_metadata(self, ginfo):
|
|
|
|
"""Collect metadata for extractor-job"""
|
|
|
|
title_en = ginfo["title"].get("english", "")
|
|
|
|
title_ja = ginfo["title"].get("japanese", "")
|
|
|
|
return {
|
2017-09-10 22:20:47 +02:00
|
|
|
"gallery_id": self.gid,
|
|
|
|
"upload_date": ginfo["upload_date"],
|
|
|
|
"media_id": ginfo["media_id"],
|
2015-10-28 12:08:27 +01:00
|
|
|
"scanlator": ginfo["scanlator"],
|
|
|
|
"count": ginfo["num_pages"],
|
|
|
|
"title": title_en or title_ja,
|
2017-09-10 22:20:47 +02:00
|
|
|
"title_en": title_en,
|
|
|
|
"title_ja": title_ja,
|
2015-10-28 12:08:27 +01:00
|
|
|
}
|