From a9e714565122c4edf2ddb0abbe97b4a51bbb8455 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mike=20F=C3=A4hrmann?= Date: Wed, 20 Sep 2017 16:25:25 +0200 Subject: [PATCH] [hbrowse] extract hmanga metadata & general maintenance --- gallery_dl/extractor/hbrowse.py | 77 ++++++++++++++++++++----------- gallery_dl/extractor/mangazuki.py | 2 +- test/test_extractors.py | 4 +- 3 files changed, 52 insertions(+), 31 deletions(-) diff --git a/gallery_dl/extractor/hbrowse.py b/gallery_dl/extractor/hbrowse.py index 6169750a..395deba7 100644 --- a/gallery_dl/extractor/hbrowse.py +++ b/gallery_dl/extractor/hbrowse.py @@ -13,66 +13,87 @@ from .. import text import json -class HbrowseMangaExtractor(MangaExtractor): - """Extractor for manga from hbrowse.com""" +class HbrowseExtractor(Extractor): + """Base class for hbrowse extractors""" category = "hbrowse" + root = "http://www.hbrowse.com" + + @staticmethod + def _parse_page(page, data): + text.extract_all(page, ( + ('manga' , '', ''), + ('artist', '', ''), + ('total' , '', ' '), + ('origin', '', ''), + ), values=data) + + data["manga"] = text.unescape(data["manga"]) + data["total"] = int(data["total"]) + data["artist"] = text.remove_html(data["artist"]) + data["origin"] = text.remove_html(data["origin"]) + + +class HbrowseMangaExtractor(MangaExtractor, HbrowseExtractor): + """Extractor for manga from hbrowse.com""" pattern = [r"(?:https?://)?((?:www\.)?hbrowse\.com/\d+)/?$"] reverse = False test = [("http://www.hbrowse.com/10363", { "url": "4d9def5df21c23f8c3d36de2076c189c02ea43bd", + "keyword": "aa0c6ba9ba180f18861aa5d608ff7f1966e666f8", })] def chapters(self, page): + results = [] + data = {"manga_id": int(self.url.rstrip("/").rpartition("/")[2])} + self._parse_page(page, data) + + pos = 0 needle = '\nView ', '<', pos) + results.append((url, { + "chapter": int(url.rpartition("/")[2][1:]), + "title": title, **data + })) -class HbrowseChapterExtractor(Extractor): +class HbrowseChapterExtractor(HbrowseExtractor): """Extractor for manga-chapters from hbrowse.com""" - category = "hbrowse" subcategory = "chapter" - directory_fmt = ["{category}", "{gallery_id} {title}", "c{chapter:>05}"] - filename_fmt = ("{category}_{gallery_id}_{chapter:>05}_" + directory_fmt = ["{category}", "{manga_id} {manga}", "c{chapter:>05}"] + filename_fmt = ("{category}_{manga_id}_{chapter:>05}_" "{num:>03}.{extension}") - pattern = [r"(?:https?://)?(?:www\.)?hbrowse\.com/(\d+)/(c\d+)"] + pattern = [r"(?:https?://)?(?:www\.)?hbrowse\.com/(\d+)/c(\d+)"] test = [("http://www.hbrowse.com/10363/c00000", { "url": "634f4800858913f097bc3b62a8fedaf74b5254bd", - "keyword": "f0f96cefda19e5aee1a19454f63ffe3a425602ab", + "keyword": "730bd33de2a0a0fb4e0b6dcdafedcaeee1060047", "content": "44578ebbe176c2c27434966aef22945787e2781e", })] - url_base = "http://www.hbrowse.com" def __init__(self, match): - Extractor.__init__(self) + HbrowseExtractor.__init__(self) self.gid, self.chapter = match.groups() - self.path = "/{}/{}/".format(self.gid, self.chapter) + self.path = "/{}/c{}/".format(self.gid, self.chapter) def items(self): - page = self.request(self.url_base + self.path).text + page = self.request(self.root + self.path).text data = self.get_job_metadata(page) yield Message.Version, 1 yield Message.Directory, data - for num, url in enumerate(self.get_image_urls(page), 1): - data["num"] = num + for data["num"], url in enumerate(self.get_image_urls(page), 1): yield Message.Url, url, text.nameext_from_url(url, data) def get_job_metadata(self, page): """Collect metadata for extractor-job""" - data = { - 'gallery_id': self.gid, - "chapter": int(self.chapter[1:]), - } - return text.extract_all(page, ( - ('title' , '', ''), - (None , '', ''), - ('artist' , '>', '<'), - ('count_total', '', ' '), - (None , '', ''), - ('origin' , '>', '<'), - ), values=data)[0] + data = {"manga_id": int(self.gid), "chapter": int(self.chapter)} + self._parse_page(page, data) + return data def get_image_urls(self, page): """Yield all image-urls for a 'chapter'""" - base = self.url_base + "/data" + self.path + base = self.root + "/data" + self.path json_data = text.extract(page, ';list = ', ',"zzz"')[0] + "]" return [base + name for name in json.loads(json_data)] diff --git a/gallery_dl/extractor/mangazuki.py b/gallery_dl/extractor/mangazuki.py index a536bab2..8f75bb99 100644 --- a/gallery_dl/extractor/mangazuki.py +++ b/gallery_dl/extractor/mangazuki.py @@ -80,7 +80,7 @@ class MangazukiMangaExtractor(MangaExtractor): "url": "aab747414191b14e768f4a1eb148448d83ef2e14", }), ("https://raws.mangazuki.co/series/Rakujitsu-no-Pathos", { - "url": "57ac10ce4f4a93a313c80542bbc5bd6fd922b055", + "url": "0b85292b096909e8419632f35d3e2680d468c12c", }), ] diff --git a/test/test_extractors.py b/test/test_extractors.py index 2aa678ca..fb8501ab 100644 --- a/test/test_extractors.py +++ b/test/test_extractors.py @@ -66,9 +66,9 @@ skip = [ "exhentai", "kissmanga", "mangafox", "dynastyscans", "nijie", "archivedmoe", "archiveofsins", "thebarchive", # temporary issues - "imgtrex", # 504 - "hentaifoundry", # SSL cert expired + "luscious", # "high load" "pawoo", + "seaotterscans", # "Name or service not known" ] # enable selective testing for direct calls if __name__ == '__main__' and len(sys.argv) > 1: