2016-11-14 18:29:45 +01:00
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
|
2019-01-30 16:18:22 +01:00
|
|
|
# Copyright 2016-2019 Mike Fährmann
|
2016-11-14 18:29:45 +01:00
|
|
|
#
|
|
|
|
# This program is free software; you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License version 2 as
|
|
|
|
# published by the Free Software Foundation.
|
|
|
|
|
2019-01-30 16:18:22 +01:00
|
|
|
"""Extract comic-issues and entire comics from https://readcomiconline.to/"""
|
2016-11-14 18:29:45 +01:00
|
|
|
|
2018-02-07 11:22:47 +01:00
|
|
|
from .common import ChapterExtractor, MangaExtractor
|
2018-04-20 14:53:21 +02:00
|
|
|
from .. import text, cloudflare
|
2016-11-14 18:29:45 +01:00
|
|
|
import re
|
|
|
|
|
2017-02-01 00:53:19 +01:00
|
|
|
|
2018-02-07 11:22:47 +01:00
|
|
|
class ReadcomiconlineBase():
|
2016-11-14 18:29:45 +01:00
|
|
|
"""Base class for readcomiconline extractors"""
|
|
|
|
category = "readcomiconline"
|
|
|
|
directory_fmt = ["{category}", "{comic}", "{issue:>03}"]
|
|
|
|
filename_fmt = "{comic}_{issue:>03}_{page:>03}.{extension}"
|
2018-02-12 23:09:34 +01:00
|
|
|
archive_fmt = "{issue_id}_{page}"
|
2018-12-09 14:54:55 +01:00
|
|
|
root = "https://readcomiconline.to"
|
2016-11-14 18:29:45 +01:00
|
|
|
|
2018-02-07 11:22:47 +01:00
|
|
|
request = cloudflare.request_func
|
2017-11-15 13:54:40 +01:00
|
|
|
|
2016-11-14 18:29:45 +01:00
|
|
|
|
2018-02-07 11:22:47 +01:00
|
|
|
class ReadcomiconlineComicExtractor(ReadcomiconlineBase, MangaExtractor):
|
2016-11-14 18:29:45 +01:00
|
|
|
"""Extractor for comics from readcomiconline.to"""
|
|
|
|
subcategory = "comic"
|
2018-12-09 14:54:55 +01:00
|
|
|
pattern = [r"(?i)(?:https?://)?(?:www\.)?readcomiconline\.to"
|
|
|
|
r"(/Comic/[^/?&#]+/?)$"]
|
2017-09-18 19:18:24 +02:00
|
|
|
test = [
|
2018-12-09 14:54:55 +01:00
|
|
|
("https://readcomiconline.to/Comic/W-i-t-c-h", {
|
|
|
|
"url": "e231bc2a293edb465133c37a8e36a7e7d94cab14",
|
2018-02-12 23:09:34 +01:00
|
|
|
"keyword": "3986248e4458fa44a201ec073c3684917f48ee0c",
|
2017-09-18 19:18:24 +02:00
|
|
|
}),
|
2018-12-09 14:54:55 +01:00
|
|
|
("https://readcomiconline.to/Comic/Bazooka-Jules", {
|
|
|
|
"url": "711674cb78ed10bd2557315f7a67552d01b33985",
|
2018-02-12 23:09:34 +01:00
|
|
|
"keyword": "f5ba5246cd787bb750924d9690cb1549199bd516",
|
2017-09-18 19:18:24 +02:00
|
|
|
}),
|
|
|
|
]
|
|
|
|
|
2018-02-07 11:22:47 +01:00
|
|
|
def __init__(self, match):
|
2018-12-09 14:54:55 +01:00
|
|
|
MangaExtractor.__init__(self, match, self.root + match.group(1))
|
2018-02-07 11:22:47 +01:00
|
|
|
|
2017-09-18 19:18:24 +02:00
|
|
|
def chapters(self, page):
|
|
|
|
results = []
|
2018-12-30 13:13:25 +01:00
|
|
|
comic, pos = text.extract(page, ' class="barTitle">', '<')
|
|
|
|
page , pos = text.extract(page, ' class="listing">', '</table>', pos)
|
2017-09-18 19:18:24 +02:00
|
|
|
|
2018-12-30 13:13:25 +01:00
|
|
|
comic = comic.rpartition("information")[0].strip()
|
|
|
|
needle = ' title="Read {} '.format(comic)
|
|
|
|
comic = text.unescape(comic)
|
|
|
|
|
|
|
|
for item in text.extract_iter(page, ' href="', ' comic online '):
|
|
|
|
url, _, issue = item.partition(needle)
|
|
|
|
url = url.rpartition('"')[0]
|
2017-09-18 19:18:24 +02:00
|
|
|
if issue.startswith('Issue #'):
|
|
|
|
issue = issue[7:]
|
|
|
|
results.append((self.root + url, {
|
2018-02-12 23:09:34 +01:00
|
|
|
"comic": comic, "issue": issue,
|
2018-04-20 14:53:21 +02:00
|
|
|
"issue_id": text.parse_int(url.rpartition("=")[2]),
|
2017-09-18 19:18:24 +02:00
|
|
|
"lang": "en", "language": "English",
|
|
|
|
}))
|
|
|
|
return results
|
2016-11-14 18:29:45 +01:00
|
|
|
|
|
|
|
|
2018-02-07 11:22:47 +01:00
|
|
|
class ReadcomiconlineIssueExtractor(ReadcomiconlineBase, ChapterExtractor):
|
2016-11-14 18:29:45 +01:00
|
|
|
"""Extractor for comic-issues from readcomiconline.to"""
|
|
|
|
subcategory = "issue"
|
2018-02-07 11:22:47 +01:00
|
|
|
pattern = [r"(?i)(?:https?://)?(?:www\.)?readcomiconline\.to"
|
2018-12-09 14:54:55 +01:00
|
|
|
r"(/Comic/[^/?&#]+/[^/?&#]+\?id=(\d+))"]
|
|
|
|
test = [("https://readcomiconline.to/Comic/W-i-t-c-h/Issue-130?id=22289", {
|
2018-07-06 15:18:49 +02:00
|
|
|
"url": "2bbab6ec4fbc05d269cca420a82a9b5acda28682",
|
2018-02-12 23:09:34 +01:00
|
|
|
"keyword": "c6de1c9c8a307dc4be56783c4ac6f1338ffac6fc",
|
2016-11-14 18:29:45 +01:00
|
|
|
})]
|
|
|
|
|
2018-02-07 11:22:47 +01:00
|
|
|
def __init__(self, match):
|
2018-12-09 14:54:55 +01:00
|
|
|
ChapterExtractor.__init__(self, self.root + match.group(1))
|
|
|
|
self.issue_id = match.group(2)
|
2018-02-07 11:22:47 +01:00
|
|
|
|
|
|
|
def get_metadata(self, page):
|
2016-11-14 18:29:45 +01:00
|
|
|
comic, pos = text.extract(page, " - Read\r\n ", "\r\n")
|
|
|
|
iinfo, pos = text.extract(page, " ", "\r\n", pos)
|
|
|
|
match = re.match(r"(?:Issue )?#(\d+)|(.+)", iinfo)
|
|
|
|
return {
|
|
|
|
"comic": comic,
|
|
|
|
"issue": match.group(1) or match.group(2),
|
2018-04-20 14:53:21 +02:00
|
|
|
"issue_id": text.parse_int(self.issue_id),
|
2016-11-14 18:29:45 +01:00
|
|
|
"lang": "en",
|
|
|
|
"language": "English",
|
|
|
|
}
|
2017-04-05 12:16:23 +02:00
|
|
|
|
2018-04-06 15:30:09 +02:00
|
|
|
def get_images(self, page):
|
|
|
|
self.session.headers["Referer"] = None
|
2018-02-07 11:22:47 +01:00
|
|
|
return [
|
|
|
|
(url, None)
|
|
|
|
for url in text.extract_iter(
|
|
|
|
page, 'lstImages.push("', '"'
|
|
|
|
)
|
|
|
|
]
|