2016-02-19 15:24:49 +01:00
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
|
2019-02-08 13:45:40 +01:00
|
|
|
# Copyright 2016-2019 Mike Fährmann
|
2016-02-19 15:24:49 +01:00
|
|
|
#
|
|
|
|
# This program is free software; you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License version 2 as
|
|
|
|
# published by the Free Software Foundation.
|
|
|
|
|
2017-05-20 11:27:43 +02:00
|
|
|
"""Extract hentai-manga from https://hentai2read.com/"""
|
2016-02-19 15:24:49 +01:00
|
|
|
|
2018-02-07 11:22:47 +01:00
|
|
|
from .common import ChapterExtractor, MangaExtractor
|
2018-04-20 14:53:21 +02:00
|
|
|
from .. import text
|
2017-02-26 02:25:36 +01:00
|
|
|
import json
|
2019-02-11 18:38:47 +01:00
|
|
|
import re
|
2016-02-19 15:24:49 +01:00
|
|
|
|
2017-02-01 00:53:19 +01:00
|
|
|
|
2019-02-11 18:38:47 +01:00
|
|
|
class Hentai2readBase():
|
|
|
|
"""Base class for hentai2read extractors"""
|
2016-02-20 06:49:35 +01:00
|
|
|
category = "hentai2read"
|
2019-02-11 18:38:47 +01:00
|
|
|
root = "https://hentai2read.com"
|
2016-02-20 06:49:35 +01:00
|
|
|
|
2017-09-20 13:28:57 +02:00
|
|
|
|
2019-02-11 18:38:47 +01:00
|
|
|
class Hentai2readChapterExtractor(Hentai2readBase, ChapterExtractor):
|
2016-09-12 10:20:57 +02:00
|
|
|
"""Extractor for a single manga chapter from hentai2read.com"""
|
2018-02-12 23:09:34 +01:00
|
|
|
archive_fmt = "{chapter_id}_{page}"
|
2019-02-11 18:38:47 +01:00
|
|
|
pattern = r"(?:https?://)?(?:www\.)?hentai2read\.com(/[^/?&#]+/(\d+))"
|
|
|
|
test = ("https://hentai2read.com/amazon_elixir/1/", {
|
2017-08-02 18:31:21 +02:00
|
|
|
"url": "964b942cf492b3a129d2fe2608abfc475bc99e71",
|
2018-02-07 11:22:47 +01:00
|
|
|
"keyword": "9845105898d28c6a540cffdea60a1a20fab52431",
|
2019-02-08 13:45:40 +01:00
|
|
|
})
|
2016-02-19 15:24:49 +01:00
|
|
|
|
|
|
|
def __init__(self, match):
|
2019-02-11 18:38:47 +01:00
|
|
|
self.chapter = match.group(2)
|
|
|
|
ChapterExtractor.__init__(self, match)
|
2016-02-19 15:24:49 +01:00
|
|
|
|
2019-02-11 18:38:47 +01:00
|
|
|
def metadata(self, page):
|
2018-02-07 11:22:47 +01:00
|
|
|
title, pos = text.extract(page, "<title>", "</title>")
|
|
|
|
manga_id, pos = text.extract(page, 'data-mid="', '"', pos)
|
|
|
|
chapter_id, pos = text.extract(page, 'data-cid="', '"', pos)
|
2017-08-22 13:51:00 +02:00
|
|
|
match = re.match(r"Reading (.+) \(([^)]+)\) Hentai(?: by (.+))? - "
|
|
|
|
r"(\d+): (.+) . Page 1 ", title)
|
2016-02-19 15:24:49 +01:00
|
|
|
return {
|
2017-08-22 13:51:00 +02:00
|
|
|
"manga": match.group(1),
|
2018-04-20 14:53:21 +02:00
|
|
|
"manga_id": text.parse_int(manga_id),
|
|
|
|
"chapter": text.parse_int(self.chapter),
|
|
|
|
"chapter_id": text.parse_int(chapter_id),
|
2017-08-22 13:51:00 +02:00
|
|
|
"type": match.group(2),
|
|
|
|
"author": match.group(3),
|
|
|
|
"title": match.group(5),
|
2016-02-19 15:24:49 +01:00
|
|
|
"lang": "en",
|
|
|
|
"language": "English",
|
|
|
|
}
|
2017-02-26 02:25:36 +01:00
|
|
|
|
|
|
|
@staticmethod
|
2019-02-11 18:38:47 +01:00
|
|
|
def images(page):
|
2017-02-26 02:25:36 +01:00
|
|
|
images = text.extract(page, "'images' : ", ",\n")[0]
|
2018-02-07 11:22:47 +01:00
|
|
|
return [
|
|
|
|
("https://hentaicdn.com/hentai" + part, None)
|
|
|
|
for part in json.loads(images)
|
|
|
|
]
|
2019-02-11 18:38:47 +01:00
|
|
|
|
|
|
|
|
|
|
|
class Hentai2readMangaExtractor(Hentai2readBase, MangaExtractor):
|
|
|
|
"""Extractor for hmanga from hentai2read.com"""
|
|
|
|
pattern = r"(?:https?://)?(?:www\.)?hentai2read\.com(/[^/?&#]+)/?$"
|
|
|
|
test = (
|
|
|
|
("https://hentai2read.com/amazon_elixir/", {
|
|
|
|
"url": "273073752d418ec887d7f7211e42b832e8c403ba",
|
|
|
|
"keyword": "13c1ce7e15cbb941f01c843b0e89adc993d939ac",
|
|
|
|
}),
|
|
|
|
("https://hentai2read.com/oshikage_riot/", {
|
|
|
|
"url": "6595f920a3088a15c2819c502862d45f8eb6bea6",
|
|
|
|
"keyword": "675c7b7a4fa52cf569c283553bd16b4200a5cd36",
|
|
|
|
}),
|
|
|
|
)
|
|
|
|
|
|
|
|
def chapters(self, page):
|
|
|
|
results = []
|
|
|
|
manga, pos = text.extract(
|
|
|
|
page, '<span itemprop="name">', '</span>')
|
|
|
|
mtype, pos = text.extract(
|
|
|
|
page, '<small class="text-danger">[', ']</small>', pos)
|
|
|
|
manga_id = text.parse_int(text.extract(
|
|
|
|
page, 'data-mid="', '"', pos)[0])
|
|
|
|
|
|
|
|
while True:
|
|
|
|
chapter_id, pos = text.extract(page, ' data-cid="', '"', pos)
|
|
|
|
if not chapter_id:
|
|
|
|
return results
|
|
|
|
_ , pos = text.extract(page, ' href="', '"', pos)
|
|
|
|
url, pos = text.extract(page, ' href="', '"', pos)
|
|
|
|
chapter, pos = text.extract(page, '>', '<', pos)
|
|
|
|
|
|
|
|
chapter, _, title = text.unescape(chapter).strip().partition(" - ")
|
|
|
|
results.append((url, {
|
|
|
|
"manga_id": manga_id, "manga": manga, "type": mtype,
|
|
|
|
"chapter_id": text.parse_int(chapter_id),
|
|
|
|
"chapter": text.parse_int(chapter),
|
|
|
|
"title": title, "lang": "en", "language": "English",
|
|
|
|
}))
|