1
0
mirror of https://github.com/mikf/gallery-dl.git synced 2024-11-24 03:32:33 +01:00
gallery-dl/gallery_dl/extractor/hitomi.py

80 lines
3.0 KiB
Python
Raw Normal View History

2015-10-28 16:24:35 +01:00
# -*- coding: utf-8 -*-
# Copyright 2015 Mike Fährmann
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
"""Extract images from https://hitomi.la/"""
from .common import Extractor, Message
from .. import text, iso639_1
2015-10-29 17:53:29 +01:00
import string
2015-10-28 16:24:35 +01:00
class HitomiExtractor(Extractor):
2015-11-21 04:26:30 +01:00
category = "hitomi"
directory_fmt = ["{category}", "{gallery-id} {title}"]
filename_fmt = "{category}_{gallery-id}_{num:>03}_{name}.{extension}"
pattern = [r"(?:https?://)?hitomi\.la/(?:galleries|reader)/(\d+)\.html"]
2015-10-28 16:24:35 +01:00
def __init__(self, match):
Extractor.__init__(self)
self.gid = match.group(1)
def items(self):
page = self.request("https://hitomi.la/galleries/" + self.gid + ".html").text
data = self.get_job_metadata(page)
images = self.get_image_urls(page)
data["count"] = len(images)
yield Message.Version, 1
yield Message.Directory, data
for num, url in enumerate(images, 1):
data["num"] = num
2015-11-16 17:32:26 +01:00
yield Message.Url, url, text.nameext_from_url(url, data)
2015-10-28 16:24:35 +01:00
def get_job_metadata(self, page):
"""Collect metadata for extractor-job"""
2015-10-29 17:53:29 +01:00
group = ""
gtype = ""
series = ""
2015-10-28 16:24:35 +01:00
_ , pos = text.extract(page, '<h1><a href="/reader/', '')
2015-10-29 17:53:29 +01:00
title , pos = text.extract(page, '.html">', "</a>", pos)
2015-10-28 16:24:35 +01:00
_ , pos = text.extract(page, '<li><a href="/artist/', '', pos)
2015-10-29 17:53:29 +01:00
artist, pos = text.extract(page, '.html">', '</a>', pos)
test , pos = text.extract(page, '<li><a href="/group/', '', pos)
if test is not None:
group , pos = text.extract(page, '.html">', '</a>', pos)
test , pos = text.extract(page, '<a href="/type/', '', pos)
if test is not None:
gtype , pos = text.extract(page, '.html">', '</a>', pos)
_ , pos = text.extract(page, '<tdLanguage</td>', '', pos)
lang , pos = text.extract(page, '.html">', '</a>', pos)
test , pos = text.extract(page, '<a href="/series/', '', pos)
if test is not None:
series, pos = text.extract(page, '.html">', '</a>', pos)
lang = lang.capitalize()
2015-10-28 16:24:35 +01:00
return {
2015-11-21 04:26:30 +01:00
"category": self.category,
2015-10-28 16:24:35 +01:00
"gallery-id": self.gid,
"title": title,
2015-10-29 17:53:29 +01:00
"artist": string.capwords(artist),
"group": string.capwords(group),
"type": gtype[1:-1].capitalize(),
"lang": iso639_1.language_to_code(lang),
"language": lang,
2015-10-29 17:53:29 +01:00
"series": string.capwords(series),
2015-10-28 16:24:35 +01:00
}
@staticmethod
def get_image_urls(page):
"""Extract and return a list of all image-urls"""
pos = 0
images = list()
while True:
urlpart, pos = text.extract(page, "'//tn.hitomi.la/smalltn/", ".jpg',", pos)
if not urlpart:
return images
images.append("https://g.hitomi.la/galleries/" + urlpart)