1
0
mirror of https://github.com/mikf/gallery-dl.git synced 2024-11-22 02:32:33 +01:00

[mangashare] add extractor

This commit is contained in:
Mike Fährmann 2015-11-15 03:40:17 +01:00
parent 3f2fbd874d
commit 41f5971e73
No known key found for this signature in database
GPG Key ID: 5680CA389D365A88
2 changed files with 74 additions and 0 deletions

View File

@ -33,6 +33,7 @@ modules = [
"konachan",
"mangapanda",
"mangareader",
"mangashare",
"mangastream",
"nhentai",
"nijie",

View File

@ -0,0 +1,73 @@
# -*- coding: utf-8 -*-
# Copyright 2015 Mike Fährmann
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
"""Extract manga pages from http://www.mangashare.com/"""
from .common import AsynchronousExtractor, Message
from .. import text
import os
info = {
"category": "mangashare",
"extractor": "MangaShareExtractor",
"directory": ["{category}", "{manga}", "c{chapter:>03} - {title}"],
"filename": "{manga}_c{chapter:>03}_{page:>03}.{extension}",
"pattern": [
r"(?:https?://)?read\.mangashare\.com/([^/]+/chapter-\d+)",
],
}
class MangaShareExtractor(AsynchronousExtractor):
url_fmt = "http://read.mangashare.com/{}/page{:>03}.html"
def __init__(self, match):
AsynchronousExtractor.__init__(self)
self.part = match.group(1)
def items(self):
page = self.request(self.url_fmt.format(self.part, 1)).text
data = self.get_job_metadata(page)
yield Message.Version, 1
yield Message.Directory, data
for i, url in zip(range(int(data["count"])), (self.get_image_urls(page))):
name, ext = os.path.splitext(text.filename_from_url(url))
data["name"] = name
data["extension"] = ext[1:]
data["page"] = i+1
yield Message.Url, url, data.copy()
@staticmethod
def get_job_metadata(page):
"""Collect metadata for extractor-job"""
data = {
"category": info["category"],
"lang": "en",
"language": "English",
}
data, pos = text.extract_all(page, (
('manga' , 'title="', '"'),
('chapter', 'selected="selected">', ' - '),
('title' , '', '<'),
(None , 'Page 1', ''),
(None , '</select>', ''),
), values=data)
data["count"] = text.extract(page, '>Page ', '<', pos-35)[0]
return data
def get_image_urls(self, page):
"""Yield all image-urls for this chapter"""
pnum = 1
while True:
_ , pos = text.extract(page, '<div id="page">', '')
url, pos = text.extract(page, '<img src="', '"', pos)
yield url
url, pos = text.extract(page, '<img src="', '"', pos)
yield url
pnum += 2
page = self.request(self.url_fmt.format(self.part, pnum)).text