From 41f5971e73b2a256c7385fd0fc3ef7d738ff3c95 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mike=20F=C3=A4hrmann?= Date: Sun, 15 Nov 2015 03:40:17 +0100 Subject: [PATCH] [mangashare] add extractor --- gallery_dl/extractor/__init__.py | 1 + gallery_dl/extractor/mangashare.py | 73 ++++++++++++++++++++++++++++++ 2 files changed, 74 insertions(+) create mode 100644 gallery_dl/extractor/mangashare.py diff --git a/gallery_dl/extractor/__init__.py b/gallery_dl/extractor/__init__.py index ab346bb5..066cd18a 100644 --- a/gallery_dl/extractor/__init__.py +++ b/gallery_dl/extractor/__init__.py @@ -33,6 +33,7 @@ modules = [ "konachan", "mangapanda", "mangareader", + "mangashare", "mangastream", "nhentai", "nijie", diff --git a/gallery_dl/extractor/mangashare.py b/gallery_dl/extractor/mangashare.py new file mode 100644 index 00000000..dc5fae04 --- /dev/null +++ b/gallery_dl/extractor/mangashare.py @@ -0,0 +1,73 @@ +# -*- coding: utf-8 -*- + +# Copyright 2015 Mike Fährmann +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. + +"""Extract manga pages from http://www.mangashare.com/""" + +from .common import AsynchronousExtractor, Message +from .. import text +import os + +info = { + "category": "mangashare", + "extractor": "MangaShareExtractor", + "directory": ["{category}", "{manga}", "c{chapter:>03} - {title}"], + "filename": "{manga}_c{chapter:>03}_{page:>03}.{extension}", + "pattern": [ + r"(?:https?://)?read\.mangashare\.com/([^/]+/chapter-\d+)", + ], +} + +class MangaShareExtractor(AsynchronousExtractor): + + url_fmt = "http://read.mangashare.com/{}/page{:>03}.html" + + def __init__(self, match): + AsynchronousExtractor.__init__(self) + self.part = match.group(1) + + def items(self): + page = self.request(self.url_fmt.format(self.part, 1)).text + data = self.get_job_metadata(page) + yield Message.Version, 1 + yield Message.Directory, data + for i, url in zip(range(int(data["count"])), (self.get_image_urls(page))): + name, ext = os.path.splitext(text.filename_from_url(url)) + data["name"] = name + data["extension"] = ext[1:] + data["page"] = i+1 + yield Message.Url, url, data.copy() + + @staticmethod + def get_job_metadata(page): + """Collect metadata for extractor-job""" + data = { + "category": info["category"], + "lang": "en", + "language": "English", + } + data, pos = text.extract_all(page, ( + ('manga' , 'title="', '"'), + ('chapter', 'selected="selected">', ' - '), + ('title' , '', '<'), + (None , 'Page 1', ''), + (None , '', ''), + ), values=data) + data["count"] = text.extract(page, '>Page ', '<', pos-35)[0] + return data + + def get_image_urls(self, page): + """Yield all image-urls for this chapter""" + pnum = 1 + while True: + _ , pos = text.extract(page, '
', '') + url, pos = text.extract(page, '