1
0
mirror of https://github.com/mikf/gallery-dl.git synced 2024-11-23 19:22:32 +01:00
gallery-dl/gallery_dl/extractor/exhentai.py

203 lines
7.9 KiB
Python
Raw Normal View History

2015-10-31 16:50:20 +01:00
# -*- coding: utf-8 -*-
# Copyright 2014-2016 Mike Fährmann
2015-10-31 16:50:20 +01:00
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
2016-08-30 09:17:40 +02:00
"""Extract images from galleries at https://exhentai.org/"""
2015-10-31 16:50:20 +01:00
from .common import Extractor, Message
from .. import config, text, iso639_1, exception
from ..cache import cache
2014-10-12 21:56:44 +02:00
import time
import random
2016-10-11 13:27:19 +02:00
import requests
2014-10-12 21:56:44 +02:00
class ExhentaiGalleryExtractor(Extractor):
"""Extractor for image-galleries from exhentai.org"""
2015-11-21 04:26:30 +01:00
category = "exhentai"
subcategory = "gallery"
2015-11-21 04:26:30 +01:00
directory_fmt = ["{category}", "{gallery-id}"]
2016-09-20 19:01:16 +02:00
filename_fmt = "{gallery-id}_{num:>04}_{image-token}_{name}.{extension}"
pattern = [r"(?:https?://)?(?:g\.e-|ex)hentai\.org/g/(\d+)/([\da-f]{10})"]
2016-12-31 00:51:06 +01:00
test = [
("https://exhentai.org/g/960460/4f0e369d82/", {
"keyword": "623f8c86c9fe38e964682dd4309b96922655b900",
"content": "493d759de534355c9f55f8e365565b62411de146",
}),
("https://exhentai.org/g/960461/4f0e369d82/", {
"exception": exception.NotFoundError,
}),
("http://exhentai.org/g/962698/7f02358e00/", {
"exception": exception.AuthorizationError,
}),
]
2016-08-30 09:17:40 +02:00
api_url = "https://exhentai.org/api.php"
2014-10-12 21:56:44 +02:00
2015-10-31 16:50:20 +01:00
def __init__(self, match):
Extractor.__init__(self)
2016-09-20 19:01:16 +02:00
self.key = {}
self.count = 0
2016-09-20 19:01:16 +02:00
self.gid, self.token = match.groups()
self.original = config.interpolate(("extractor", "exhentai", "download-original"), True)
2015-11-19 17:04:54 +01:00
self.wait_min = config.interpolate(("extractor", "exhentai", "wait-min"), 3)
self.wait_max = config.interpolate(("extractor", "exhentai", "wait-max"), 6)
if self.wait_max < self.wait_min:
self.wait_max = self.wait_min
2014-10-12 21:56:44 +02:00
2015-10-31 16:50:20 +01:00
def items(self):
2016-09-20 19:01:16 +02:00
self.login()
2015-10-31 16:50:20 +01:00
yield Message.Version, 1
2016-09-20 19:01:16 +02:00
yield Message.Headers, self.setup_headers()
yield Message.Cookies, self.session.cookies
url = "https://exhentai.org/g/{}/{}/".format(self.gid, self.token)
response = self.session.get(url)
page = response.text
if response.status_code == 404 and "Gallery Not Available" in page:
raise exception.AuthorizationError()
if page.startswith(("\ufeffKey missing", "\ufeffGallery not found")):
2016-08-30 09:17:40 +02:00
raise exception.NotFoundError("gallery")
2016-09-20 19:01:16 +02:00
data = self.get_job_metadata(page)
self.count = int(data["count"])
2015-10-31 16:50:20 +01:00
yield Message.Directory, data
2014-10-12 21:56:44 +02:00
2016-09-20 19:01:16 +02:00
for url, image in self.get_images(page):
data.update(image)
2016-09-19 16:13:26 +02:00
if "/fullimg.php" in url:
data["extension"] = ""
2015-11-19 17:04:54 +01:00
self.wait((1, 2))
2016-09-20 19:01:16 +02:00
yield Message.Url, url, data
def setup_headers(self):
"""Initialize headers"""
self.session.headers.update({
"User-Agent": "Mozilla/5.0",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Accept-Language": "en-US,en;q=0.5",
"Referer": "https://exhentai.org/",
})
headers = self.session.headers.copy()
headers["Accept"] = "image/png,image/*;q=0.8,*/*;q=0.5"
return headers
2014-10-12 21:56:44 +02:00
2015-10-31 16:50:20 +01:00
def get_job_metadata(self, page):
2015-11-03 00:10:30 +01:00
"""Collect metadata for extractor-job"""
data = {
"gallery-id" : self.gid,
2015-10-31 16:50:20 +01:00
"gallery-token": self.token,
2015-11-03 00:10:30 +01:00
}
2016-09-20 19:01:16 +02:00
text.extract_all(page, (
("title" , '<h1 id="gn">', '</h1>'),
("title_jp" , '<h1 id="gj">', '</h1>'),
("date" , '>Posted:</td><td class="gdt2">', '</td>'),
("language" , '>Language:</td><td class="gdt2">', ' '),
("size" , '>File Size:</td><td class="gdt2">', ' '),
("size-units", '', '<'),
("count" , '>Length:</td><td class="gdt2">', ' '),
2015-11-03 00:10:30 +01:00
), values=data)
2015-11-17 20:51:06 +01:00
data["lang"] = iso639_1.language_to_code(data["language"])
2016-08-31 10:20:46 +02:00
data["title"] = text.unescape(data["title"])
data["title_jp"] = text.unescape(data["title_jp"])
2016-09-20 19:01:16 +02:00
return data
2014-10-12 21:56:44 +02:00
2016-09-20 19:01:16 +02:00
def get_images(self, page):
2015-11-03 00:10:30 +01:00
"""Collect url and metadata for all images in this gallery"""
2016-09-20 19:01:16 +02:00
url = "https://exhentai.org/s/" + text.extract(page, 'hentai.org/s/', '"')[0]
yield self.image_from_page(url)
yield from self.images_from_api()
def image_from_page(self, url):
"""Get image url and data from webpage"""
2015-11-19 17:04:54 +01:00
self.wait()
2015-10-31 16:50:20 +01:00
page = self.request(url).text
2016-09-20 19:01:16 +02:00
data = text.extract_all(page, (
(None , '<div id="i3"><a onclick="return load_image(', ''),
("nextkey" , "'", "'"),
("url" , '<img id="img" src="', '"'),
("origurl" , 'https://exhentai.org/fullimg.php', '"'),
("startkey", 'var startkey="', '";'),
("showkey" , 'var showkey="', '";'),
))[0]
self.key["start"] = data["startkey"]
self.key["show" ] = data["showkey"]
self.key["next" ] = data["nextkey"]
url = ("https://exhentai.org/fullimg.php" + text.unescape(data["origurl"])
if self.original and data["origurl"] else data["url"])
return url, text.nameext_from_url(data["url"], {
"num": 1,
"image-token": data["startkey"],
})
2016-09-20 19:01:16 +02:00
def images_from_api(self):
"""Get image url and data from api calls"""
nextkey = self.key["next" ]
2014-10-12 21:56:44 +02:00
request = {
"method" : "showpage",
2016-09-19 16:13:26 +02:00
"gid" : int(self.gid),
2016-09-20 19:01:16 +02:00
"imgkey" : nextkey,
"showkey": self.key["show"],
2014-10-12 21:56:44 +02:00
}
for request["page"] in range(2, self.count+1):
2016-10-11 13:27:19 +02:00
while True:
try:
self.wait()
2016-10-11 13:27:19 +02:00
page = self.session.post(self.api_url, json=request).json()
break
except requests.exceptions.ConnectionError:
pass
2016-09-20 19:01:16 +02:00
imgkey = nextkey
nextkey, pos = text.extract(page["i3"], "'", "'")
imgurl , pos = text.extract(page["i3"], '<img id="img" src="', '"', pos)
origurl, pos = text.extract(page["i7"], '<a href="', '"')
url = text.unescape(origurl) if self.original and origurl else imgurl
yield url, text.nameext_from_url(imgurl, {
"num": request["page"],
"image-token": imgkey
})
request["imgkey"] = nextkey
2015-11-19 17:04:54 +01:00
def wait(self, waittime=None):
"""Wait for a randomly chosen amount of seconds"""
if not waittime:
waittime = random.uniform(self.wait_min, self.wait_max)
else:
waittime = random.uniform(*waittime)
time.sleep(waittime)
def login(self):
"""Login and set necessary cookies"""
cookies = self._login_impl()
for key, value in cookies.items():
self.session.cookies.set(key, value, domain=".exhentai.org", path="/")
@cache(maxage=360*24*60*60)
def _login_impl(self):
"""Actual login implementation"""
cnames = ["ipb_member_id", "ipb_pass_hash"]
try:
cookies = config.get(("extractor", "exhentai", "cookies"))
if isinstance(cookies, dict) and all(c in cookies for c in cnames):
return cookies
except TypeError:
pass
url = "https://forums.e-hentai.org/index.php?act=Login&CODE=01"
params = {
"CookieDate": "1",
"b": "d",
"bt": "1-1",
"UserName": config.interpolate(("extractor", "exhentai", "username")),
"PassWord": config.interpolate(("extractor", "exhentai", "password")),
"ipb_login_submit": "Login!",
}
self.session.headers["Referer"] = "http://e-hentai.org/bounce_login.php?b=d&bt=1-1"
response = self.session.post(url, data=params)
if "You are now logged in as:" not in response.text:
raise exception.AuthenticationError()
return {c: response.cookies[c] for c in cnames}