1
0
mirror of https://github.com/mikf/gallery-dl.git synced 2024-11-23 03:02:50 +01:00
gallery-dl/gallery_dl/extractor/redhawkscans.py

64 lines
2.3 KiB
Python
Raw Normal View History

2015-06-25 16:55:28 +02:00
# -*- coding: utf-8 -*-
# Copyright 2015 Mike Fährmann
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
"""Extract manga pages from http://manga.redhawkscans.com/"""
from .common import SequentialExtractor
from .common import Message
from .common import unescape
import os.path
import json
import re
info = {
"category": "redhawkscans",
"extractor": "RedHawkScansExtractor",
2015-06-25 19:24:30 +02:00
"directory": ["{category}", "{manga}", "c{chapter:>03}{chapter-minor} - {title}"],
"filename": "{manga}_c{chapter:>03}{chapter-minor}_{page:>03}.{extension}",
2015-06-25 16:55:28 +02:00
"pattern": [
2015-06-25 19:24:30 +02:00
r"(?:https?://)?manga\.redhawkscans\.com/reader/read/(.+)(?:/page)?.*",
2015-06-25 16:55:28 +02:00
],
}
class RedHawkScansExtractor(SequentialExtractor):
url_base = "https://manga.redhawkscans.com/reader/read/"
def __init__(self, match, config):
SequentialExtractor.__init__(self, config)
self.part = match.group(1)
def items(self):
yield Message.Version, 1
data, pages = self.get_job_metadata()
yield Message.Directory, data
for page_index, page_data in enumerate(pages, 1):
name, ext = os.path.splitext(page_data["filename"])
page_data.update(data)
page_data["page"] = page_index
page_data["name"] = name
page_data["extension"] = ext[1:]
yield Message.Url, "https" + page_data["url"][4:], page_data
def get_job_metadata(self):
"""Collect metadata for extractor-job"""
2015-06-25 19:24:30 +02:00
page = self.request(self.url_base + self.part).content.decode("utf-8")
2015-06-25 16:55:28 +02:00
_ , pos = self.extract(page, '<h1 class="tbtitle dnone">', '')
manga , pos = self.extract(page, 'title="', '"', pos)
2015-06-25 19:24:30 +02:00
chapter , pos = self.extract(page, '">', '</a>', pos)
2015-06-25 17:59:20 +02:00
json_data, pos = self.extract(page, 'var pages = ', ';\r\n', pos)
2015-06-25 19:24:30 +02:00
match = re.match(r"(Chapter (\d+)([^:+]*)(?:: (.*))?|[^:]+)", chapter)
2015-06-25 16:55:28 +02:00
return {
"category": info["category"],
"manga": unescape(manga),
2015-06-25 19:24:30 +02:00
"chapter": match.group(2) or match.group(1),
"chapter-minor": match.group(3) or "",
2015-06-25 16:55:28 +02:00
"language": "English",
2015-06-25 19:24:30 +02:00
"title": unescape(match.group(4) or ""),
2015-06-25 17:59:20 +02:00
}, json.loads(json_data)