2020-02-18 23:49:59 +01:00
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
|
2023-02-07 23:14:53 +01:00
|
|
|
# Copyright 2020-2023 Mike Fährmann
|
2020-02-18 23:49:59 +01:00
|
|
|
#
|
|
|
|
# This program is free software; you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License version 2 as
|
|
|
|
# published by the Free Software Foundation.
|
|
|
|
|
|
|
|
"""Extractors for https://hentaihand.com/"""
|
|
|
|
|
|
|
|
from .common import GalleryExtractor, Extractor, Message
|
|
|
|
from .. import text, util
|
|
|
|
|
|
|
|
|
|
|
|
class HentaihandGalleryExtractor(GalleryExtractor):
|
|
|
|
"""Extractor for image galleries on hentaihand.com"""
|
|
|
|
category = "hentaihand"
|
|
|
|
root = "https://hentaihand.com"
|
2020-08-29 22:56:25 +02:00
|
|
|
pattern = r"(?:https?://)?(?:www\.)?hentaihand\.com/\w+/comic/([\w-]+)"
|
2023-09-11 16:30:55 +02:00
|
|
|
example = "https://hentaihand.com/en/comic/TITLE"
|
2020-02-18 23:49:59 +01:00
|
|
|
|
|
|
|
def __init__(self, match):
|
2020-08-29 22:56:25 +02:00
|
|
|
self.slug = match.group(1)
|
|
|
|
url = "{}/api/comics/{}".format(self.root, self.slug)
|
2020-02-18 23:49:59 +01:00
|
|
|
GalleryExtractor.__init__(self, match, url)
|
|
|
|
|
|
|
|
def metadata(self, page):
|
2023-02-07 23:14:53 +01:00
|
|
|
info = util.json_loads(page)
|
2020-02-18 23:49:59 +01:00
|
|
|
data = {
|
2020-08-29 22:56:25 +02:00
|
|
|
"gallery_id" : text.parse_int(info["id"]),
|
|
|
|
"title" : info["title"],
|
|
|
|
"title_alt" : info["alternative_title"],
|
|
|
|
"slug" : self.slug,
|
|
|
|
"type" : info["category"]["name"],
|
|
|
|
"language" : info["language"]["name"],
|
|
|
|
"lang" : util.language_to_code(info["language"]["name"]),
|
|
|
|
"tags" : [t["slug"] for t in info["tags"]],
|
|
|
|
"date" : text.parse_datetime(
|
|
|
|
info["uploaded_at"], "%Y-%m-%d"),
|
2020-02-18 23:49:59 +01:00
|
|
|
}
|
2020-08-29 22:56:25 +02:00
|
|
|
for key in ("artists", "authors", "groups", "characters",
|
|
|
|
"relationships", "parodies"):
|
|
|
|
data[key] = [v["name"] for v in info[key]]
|
2020-02-18 23:49:59 +01:00
|
|
|
return data
|
|
|
|
|
|
|
|
def images(self, _):
|
2020-08-29 22:56:25 +02:00
|
|
|
info = self.request(self.gallery_url + "/images").json()
|
|
|
|
return [(img["source_url"], img) for img in info["images"]]
|
2020-02-18 23:49:59 +01:00
|
|
|
|
|
|
|
|
|
|
|
class HentaihandTagExtractor(Extractor):
|
|
|
|
"""Extractor for tag searches on hentaihand.com"""
|
|
|
|
category = "hentaihand"
|
|
|
|
subcategory = "tag"
|
|
|
|
root = "https://hentaihand.com"
|
|
|
|
pattern = (r"(?i)(?:https?://)?(?:www\.)?hentaihand\.com"
|
2020-08-29 22:56:25 +02:00
|
|
|
r"/\w+/(parody|character|tag|artist|group|language"
|
2020-10-22 23:12:59 +02:00
|
|
|
r"|category|relationship)/([^/?#]+)")
|
2023-09-11 16:30:55 +02:00
|
|
|
example = "https://hentaihand.com/en/tag/TAG"
|
2020-02-18 23:49:59 +01:00
|
|
|
|
|
|
|
def __init__(self, match):
|
|
|
|
Extractor.__init__(self, match)
|
2020-08-29 22:56:25 +02:00
|
|
|
self.type, self.key = match.groups()
|
2020-02-18 23:49:59 +01:00
|
|
|
|
|
|
|
def items(self):
|
2020-08-29 22:56:25 +02:00
|
|
|
if self.type[-1] == "y":
|
|
|
|
tpl = self.type[:-1] + "ies"
|
|
|
|
else:
|
|
|
|
tpl = self.type + "s"
|
|
|
|
|
|
|
|
url = "{}/api/{}/{}".format(self.root, tpl, self.key)
|
|
|
|
tid = self.request(url, notfound=self.type).json()["id"]
|
|
|
|
|
|
|
|
url = self.root + "/api/comics"
|
|
|
|
params = {
|
|
|
|
"per_page": "18",
|
|
|
|
tpl : tid,
|
|
|
|
"page" : 1,
|
|
|
|
"q" : "",
|
|
|
|
"sort" : "uploaded_at",
|
|
|
|
"order" : "desc",
|
|
|
|
"duration": "day",
|
|
|
|
}
|
2020-02-18 23:49:59 +01:00
|
|
|
while True:
|
2020-08-29 22:56:25 +02:00
|
|
|
info = self.request(url, params=params).json()
|
2020-02-18 23:49:59 +01:00
|
|
|
|
2020-08-29 22:56:25 +02:00
|
|
|
for gallery in info["data"]:
|
|
|
|
gurl = "{}/en/comic/{}".format(self.root, gallery["slug"])
|
|
|
|
gallery["_extractor"] = HentaihandGalleryExtractor
|
|
|
|
yield Message.Queue, gurl, gallery
|
2020-02-18 23:49:59 +01:00
|
|
|
|
2020-08-29 22:56:25 +02:00
|
|
|
if params["page"] >= info["last_page"]:
|
|
|
|
return
|
2020-02-18 23:49:59 +01:00
|
|
|
params["page"] += 1
|