2016-02-19 15:24:49 +01:00
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
|
2017-05-20 11:27:43 +02:00
|
|
|
# Copyright 2016-2017 Mike Fährmann
|
2016-02-19 15:24:49 +01:00
|
|
|
#
|
|
|
|
# This program is free software; you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License version 2 as
|
|
|
|
# published by the Free Software Foundation.
|
|
|
|
|
2017-05-20 11:27:43 +02:00
|
|
|
"""Extract hentai-manga from https://hentai2read.com/"""
|
2016-02-19 15:24:49 +01:00
|
|
|
|
2017-05-20 11:27:43 +02:00
|
|
|
from .common import MangaExtractor
|
2016-02-19 15:24:49 +01:00
|
|
|
from .. import text
|
2016-10-05 09:19:09 +02:00
|
|
|
from . import hentaicdn
|
2016-02-19 15:24:49 +01:00
|
|
|
import re
|
2017-02-26 02:25:36 +01:00
|
|
|
import json
|
2016-02-19 15:24:49 +01:00
|
|
|
|
2017-02-01 00:53:19 +01:00
|
|
|
|
2017-05-20 11:27:43 +02:00
|
|
|
class Hentai2readMangaExtractor(MangaExtractor):
|
|
|
|
"""Extractor for hmanga from hentai2read.com"""
|
2016-02-20 06:49:35 +01:00
|
|
|
category = "hentai2read"
|
2017-05-20 11:27:43 +02:00
|
|
|
pattern = [r"(?:https?://)?(?:www\.)?(hentai2read\.com/[^/]+/?)$"]
|
2016-02-20 06:49:35 +01:00
|
|
|
test = [
|
|
|
|
("http://hentai2read.com/amazon_elixir/", {
|
|
|
|
"url": "d1f87b71d3c97b49a478cdfb6ae96b2d9520ab78",
|
|
|
|
}),
|
|
|
|
("http://hentai2read.com/oshikage_riot/", {
|
|
|
|
"url": "672f34cce7bf5a855c6c38e8bc9c5117a4b3061c",
|
|
|
|
})
|
|
|
|
]
|
|
|
|
|
2017-05-20 11:27:43 +02:00
|
|
|
def chapters(self, page):
|
2016-10-05 09:19:09 +02:00
|
|
|
page = text.extract(
|
2017-05-20 11:27:43 +02:00
|
|
|
page, '<ul class="nav-chapters remove-margin-b">', '</ul>\n</div>'
|
2016-10-05 09:19:09 +02:00
|
|
|
)[0]
|
2017-05-20 11:27:43 +02:00
|
|
|
return list(text.extract_iter(page, '<li>\n<a href="', '"'))
|
2016-10-05 09:19:09 +02:00
|
|
|
|
2016-02-20 06:49:35 +01:00
|
|
|
|
2016-10-05 09:19:09 +02:00
|
|
|
class Hentai2readChapterExtractor(hentaicdn.HentaicdnChapterExtractor):
|
2016-09-12 10:20:57 +02:00
|
|
|
"""Extractor for a single manga chapter from hentai2read.com"""
|
2016-02-19 15:24:49 +01:00
|
|
|
category = "hentai2read"
|
|
|
|
pattern = [r"(?:https?://)?(?:www\.)?hentai2read\.com/([^/]+)/(\d+)"]
|
|
|
|
test = [("http://hentai2read.com/amazon_elixir/1/", {
|
|
|
|
"url": "fb5fc4d7cc194116960eaa648c7e045a6e6f0c11",
|
2016-09-25 17:28:46 +02:00
|
|
|
"keyword": "c05d0d0bbe188926b15a43df1f8f65b8ac11c3fd",
|
2016-02-19 15:24:49 +01:00
|
|
|
})]
|
|
|
|
|
|
|
|
def __init__(self, match):
|
2016-10-05 09:19:09 +02:00
|
|
|
hentaicdn.HentaicdnChapterExtractor.__init__(self)
|
2017-02-26 02:25:36 +01:00
|
|
|
url_title, self.chapter = match.groups()
|
2017-02-01 00:53:19 +01:00
|
|
|
self.url = "http://hentai2read.com/{}/{}/".format(
|
2017-02-26 02:25:36 +01:00
|
|
|
url_title, self.chapter
|
2017-02-01 00:53:19 +01:00
|
|
|
)
|
2016-02-19 15:24:49 +01:00
|
|
|
|
|
|
|
def get_job_metadata(self, page, images):
|
2016-02-20 06:37:14 +01:00
|
|
|
title = text.extract(page, "<title>", "</title>")[0]
|
|
|
|
match = re.match(r"Reading (?:(.+) dj - )?(.+) Hentai - \d+: ", title)
|
2016-02-19 15:24:49 +01:00
|
|
|
return {
|
|
|
|
"gallery-id": images[0].split("/")[-3],
|
|
|
|
"chapter": self.chapter,
|
|
|
|
"count": len(images),
|
2016-02-20 06:37:14 +01:00
|
|
|
"series": match.group(1) or "",
|
|
|
|
"title": match.group(2),
|
2016-02-19 15:24:49 +01:00
|
|
|
"lang": "en",
|
|
|
|
"language": "English",
|
|
|
|
}
|
2017-02-26 02:25:36 +01:00
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def get_image_urls(page):
|
|
|
|
"""Extract and return a list of all image-urls"""
|
|
|
|
images = text.extract(page, "'images' : ", ",\n")[0]
|
|
|
|
return json.loads(images)
|