1
0
mirror of https://github.com/mikf/gallery-dl.git synced 2024-11-23 19:22:32 +01:00
gallery-dl/gallery_dl/extractor/komikcast.py

116 lines
3.9 KiB
Python
Raw Normal View History

# -*- coding: utf-8 -*-
2022-07-12 23:07:58 +02:00
# Copyright 2018-2022 Mike Fährmann
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
2022-07-12 23:07:58 +02:00
"""Extractors for https://komikcast.me/"""
from .common import ChapterExtractor, MangaExtractor
from .. import text
import re
2022-07-12 23:07:58 +02:00
BASE_PATTERN = r"(?:https?://)?(?:www\.)?komikcast\.(?:me|com)"
class KomikcastBase():
"""Base class for komikcast extractors"""
category = "komikcast"
2022-07-12 23:07:58 +02:00
root = "https://komikcast.me"
@staticmethod
def parse_chapter_string(chapter_string, data=None):
"""Parse 'chapter_string' value and add its info to 'data'"""
if not data:
data = {}
match = re.match(
r"(?:(.*) Chapter )?0*(\d+)([^ ]*)(?: (?:- )?(.+))?",
text.unescape(chapter_string),
)
manga, chapter, data["chapter_minor"], title = match.groups()
if manga:
data["manga"] = manga.partition(" Chapter ")[0]
2021-03-28 21:12:41 +02:00
if title and not title.lower().startswith("bahasa indonesia"):
data["title"] = title.strip()
else:
data["title"] = ""
data["chapter"] = text.parse_int(chapter)
data["lang"] = "id"
data["language"] = "Indonesian"
return data
class KomikcastChapterExtractor(KomikcastBase, ChapterExtractor):
2022-07-12 23:07:58 +02:00
"""Extractor for manga-chapters from komikcast.me"""
pattern = BASE_PATTERN + r"(/chapter/[^/?#]+/)"
test = (
2022-07-12 23:07:58 +02:00
(("https://komikcast.me/chapter"
"/apotheosis-chapter-02-2-bahasa-indonesia/"), {
"url": "74eca5c9b27b896816497f9b2d847f2a1fcfc209",
"keyword": "f3938e1aff9ad1f302f52447e9781b21f6da26d4",
}),
2022-07-12 23:07:58 +02:00
(("https://komikcast.me/chapter"
"/soul-land-ii-chapter-300-1-bahasa-indonesia/"), {
"url": "243a5250e210b40d17217e83b7547cefea5638bd",
"keyword": "cb646cfed3d45105bd645ab38b2e9f7d8c436436",
2019-04-26 15:14:10 +02:00
}),
)
def metadata(self, page):
2021-04-15 17:04:53 +02:00
info = text.extract(page, "<title>", " Komikcast<")[0]
return self.parse_chapter_string(info)
@staticmethod
def images(page):
readerarea = text.extract(
2021-03-28 21:12:41 +02:00
page, '<div class="main-reading-area', '</div')[0]
return [
(text.unescape(url), None)
2019-04-26 15:14:10 +02:00
for url in re.findall(r"<img[^>]* src=[\"']([^\"']+)", readerarea)
]
class KomikcastMangaExtractor(KomikcastBase, MangaExtractor):
2022-07-12 23:07:58 +02:00
"""Extractor for manga from komikcast.me"""
chapterclass = KomikcastChapterExtractor
2022-07-12 23:07:58 +02:00
pattern = BASE_PATTERN + r"(/(?:komik/)?[^/?#]+)/?$"
test = (
2022-07-12 23:07:58 +02:00
("https://komikcast.me/komik/090-eko-to-issho/", {
"url": "08204f0a703ec5272121abcf0632ecacba1e588f",
2019-03-22 13:27:40 +01:00
"keyword": "837a7e96867344ff59d840771c04c20dc46c0ab1",
}),
2022-07-12 23:07:58 +02:00
("https://komikcast.me/tonari-no-kashiwagi-san/"),
)
def chapters(self, page):
results = []
2019-03-22 13:27:40 +01:00
data = self.metadata(page)
for item in text.extract_iter(
2021-03-28 21:12:41 +02:00
page, '<a class="chapter-link-item" href="', '</a'):
url, _, chapter_string = item.rpartition('">Chapter ')
self.parse_chapter_string(chapter_string, data)
results.append((url, data.copy()))
return results
@staticmethod
2019-03-22 13:27:40 +01:00
def metadata(page):
"""Return a dict with general metadata"""
2021-04-15 17:04:53 +02:00
manga , pos = text.extract(page, "<title>" , " Komikcast<")
2021-03-28 21:12:41 +02:00
genres, pos = text.extract(
page, 'class="komik_info-content-genre">', "</span>", pos)
2019-03-22 13:27:40 +01:00
author, pos = text.extract(page, ">Author:", "</span>", pos)
mtype , pos = text.extract(page, ">Type:" , "</span>", pos)
return {
2021-03-28 21:12:41 +02:00
"manga": text.unescape(manga),
"genres": text.split_html(genres),
2019-03-22 13:27:40 +01:00
"author": text.remove_html(author),
"type": text.remove_html(mtype),
}