2015-10-31 16:50:20 +01:00
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
|
2019-01-15 13:35:58 +01:00
|
|
|
# Copyright 2014-2019 Mike Fährmann
|
2015-10-31 16:50:20 +01:00
|
|
|
#
|
|
|
|
# This program is free software; you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License version 2 as
|
|
|
|
# published by the Free Software Foundation.
|
|
|
|
|
2016-08-30 09:17:40 +02:00
|
|
|
"""Extract images from galleries at https://exhentai.org/"""
|
2015-10-31 16:50:20 +01:00
|
|
|
|
|
|
|
from .common import Extractor, Message
|
2017-04-25 17:12:48 +02:00
|
|
|
from .. import text, util, exception
|
2016-07-23 17:55:46 +02:00
|
|
|
from ..cache import cache
|
2019-01-26 15:52:55 +01:00
|
|
|
import itertools
|
2014-10-12 21:56:44 +02:00
|
|
|
import random
|
2019-01-26 15:52:55 +01:00
|
|
|
import time
|
|
|
|
import math
|
2014-10-12 21:56:44 +02:00
|
|
|
|
2017-02-01 00:53:19 +01:00
|
|
|
|
2019-01-15 13:35:58 +01:00
|
|
|
BASE_PATTERN = r"(?:https?://)?(e[x-]|g\.e-)hentai\.org"
|
|
|
|
|
|
|
|
|
2018-03-27 16:50:47 +02:00
|
|
|
class ExhentaiExtractor(Extractor):
|
2018-03-27 18:58:42 +02:00
|
|
|
"""Base class for exhentai extractors"""
|
2015-11-21 04:26:30 +01:00
|
|
|
category = "exhentai"
|
2019-02-08 13:45:40 +01:00
|
|
|
directory_fmt = ("{category}", "{gallery_id}")
|
2017-09-10 22:20:47 +02:00
|
|
|
filename_fmt = "{gallery_id}_{num:>04}_{image_token}_{name}.{extension}"
|
2018-01-30 22:49:16 +01:00
|
|
|
archive_fmt = "{gallery_id}_{num}"
|
2018-03-27 16:50:47 +02:00
|
|
|
cookiedomain = ".exhentai.org"
|
|
|
|
cookienames = ("ipb_member_id", "ipb_pass_hash")
|
|
|
|
root = "https://exhentai.org"
|
|
|
|
|
|
|
|
def __init__(self):
|
|
|
|
Extractor.__init__(self)
|
2019-01-26 18:40:39 +01:00
|
|
|
self.limits = self.config("limits", True)
|
2018-03-27 16:50:47 +02:00
|
|
|
self.original = self.config("original", True)
|
|
|
|
self.wait_min = self.config("wait-min", 3)
|
|
|
|
self.wait_max = self.config("wait-max", 6)
|
2019-01-26 18:40:39 +01:00
|
|
|
|
|
|
|
self._remaining = 0
|
2018-03-27 16:50:47 +02:00
|
|
|
if self.wait_max < self.wait_min:
|
|
|
|
self.wait_max = self.wait_min
|
|
|
|
self.session.headers["Referer"] = self.root + "/"
|
|
|
|
|
|
|
|
def request(self, *args, **kwargs):
|
|
|
|
response = Extractor.request(self, *args, **kwargs)
|
|
|
|
if self._is_sadpanda(response):
|
|
|
|
self.log.info("sadpanda.jpg")
|
|
|
|
raise exception.AuthorizationError()
|
|
|
|
return response
|
|
|
|
|
|
|
|
def wait(self, waittime=None):
|
|
|
|
"""Wait for a randomly chosen amount of seconds"""
|
|
|
|
if not waittime:
|
|
|
|
waittime = random.uniform(self.wait_min, self.wait_max)
|
|
|
|
else:
|
|
|
|
waittime = random.uniform(waittime * 0.66, waittime * 1.33)
|
|
|
|
time.sleep(waittime)
|
|
|
|
|
|
|
|
def login(self):
|
|
|
|
"""Login and set necessary cookies"""
|
|
|
|
if self._check_cookies(self.cookienames):
|
|
|
|
return
|
|
|
|
username, password = self._get_auth_info()
|
2019-01-30 17:09:32 +01:00
|
|
|
if username:
|
|
|
|
self._update_cookies(self._login_impl(username, password))
|
|
|
|
else:
|
2018-03-27 16:50:47 +02:00
|
|
|
self.log.info("no username given; using e-hentai.org")
|
|
|
|
self.root = "https://e-hentai.org"
|
|
|
|
self.original = False
|
2019-01-26 18:40:39 +01:00
|
|
|
self.limits = False
|
2018-08-16 09:17:22 +02:00
|
|
|
self.session.cookies["nw"] = "1"
|
2018-03-27 16:50:47 +02:00
|
|
|
|
|
|
|
@cache(maxage=90*24*60*60, keyarg=1)
|
|
|
|
def _login_impl(self, username, password):
|
|
|
|
self.log.info("Logging in as %s", username)
|
|
|
|
url = "https://forums.e-hentai.org/index.php?act=Login&CODE=01"
|
2019-01-30 17:09:32 +01:00
|
|
|
headers = {
|
|
|
|
"Referer": "https://e-hentai.org/bounce_login.php?b=d&bt=1-1",
|
|
|
|
}
|
2018-03-27 16:50:47 +02:00
|
|
|
data = {
|
|
|
|
"CookieDate": "1",
|
|
|
|
"b": "d",
|
|
|
|
"bt": "1-1",
|
|
|
|
"UserName": username,
|
|
|
|
"PassWord": password,
|
|
|
|
"ipb_login_submit": "Login!",
|
|
|
|
}
|
|
|
|
|
2019-01-30 17:09:32 +01:00
|
|
|
response = self.request(url, method="POST", headers=headers, data=data)
|
2018-03-27 16:50:47 +02:00
|
|
|
if "You are now logged in as:" not in response.text:
|
|
|
|
raise exception.AuthenticationError()
|
|
|
|
return {c: response.cookies[c] for c in self.cookienames}
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def _is_sadpanda(response):
|
|
|
|
"""Return True if the response object contains a sad panda"""
|
|
|
|
return (
|
|
|
|
response.headers.get("Content-Length") == "9615" and
|
|
|
|
"sadpanda.jpg" in response.headers.get("Content-Disposition", "")
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
class ExhentaiGalleryExtractor(ExhentaiExtractor):
|
|
|
|
"""Extractor for image galleries from exhentai.org"""
|
|
|
|
subcategory = "gallery"
|
2019-02-08 13:45:40 +01:00
|
|
|
pattern = (BASE_PATTERN +
|
2019-01-26 15:52:55 +01:00
|
|
|
r"(?:/g/(\d+)/([\da-f]{10})"
|
2019-02-08 13:45:40 +01:00
|
|
|
r"|/s/([\da-f]{10})/(\d+)-(\d+))")
|
|
|
|
test = (
|
2016-12-31 00:51:06 +01:00
|
|
|
("https://exhentai.org/g/960460/4f0e369d82/", {
|
2019-01-26 15:52:55 +01:00
|
|
|
"keyword": "ba0785e49e3877cfa3f91c1ad9a5ac7816339bf5",
|
2016-12-31 00:51:06 +01:00
|
|
|
"content": "493d759de534355c9f55f8e365565b62411de146",
|
|
|
|
}),
|
|
|
|
("https://exhentai.org/g/960461/4f0e369d82/", {
|
|
|
|
"exception": exception.NotFoundError,
|
|
|
|
}),
|
|
|
|
("http://exhentai.org/g/962698/7f02358e00/", {
|
|
|
|
"exception": exception.AuthorizationError,
|
|
|
|
}),
|
2019-01-26 15:52:55 +01:00
|
|
|
("https://exhentai.org/s/3957343c3b/960460-5", {
|
|
|
|
"count": 2,
|
|
|
|
}),
|
2019-02-08 13:45:40 +01:00
|
|
|
("https://e-hentai.org/g/960460/4f0e369d82/"),
|
|
|
|
("https://g.e-hentai.org/g/960460/4f0e369d82/"),
|
|
|
|
)
|
2014-10-12 21:56:44 +02:00
|
|
|
|
2015-10-31 16:50:20 +01:00
|
|
|
def __init__(self, match):
|
2018-03-27 16:50:47 +02:00
|
|
|
ExhentaiExtractor.__init__(self)
|
2016-09-20 19:01:16 +02:00
|
|
|
self.key = {}
|
2016-10-12 15:19:31 +02:00
|
|
|
self.count = 0
|
2019-01-26 15:52:55 +01:00
|
|
|
self.gallery_id = text.parse_int(match.group(2) or match.group(5))
|
|
|
|
self.gallery_token = match.group(3)
|
|
|
|
self.image_token = match.group(4)
|
|
|
|
self.image_num = text.parse_int(match.group(6), 1)
|
2014-10-12 21:56:44 +02:00
|
|
|
|
2015-10-31 16:50:20 +01:00
|
|
|
def items(self):
|
2016-09-20 19:01:16 +02:00
|
|
|
self.login()
|
|
|
|
|
2019-01-26 15:52:55 +01:00
|
|
|
if self.gallery_token:
|
|
|
|
gpage = self._gallery_page()
|
|
|
|
self.image_token = text.extract(gpage, 'hentai.org/s/', '"')[0]
|
|
|
|
self.wait()
|
|
|
|
ipage = self._image_page()
|
|
|
|
else:
|
|
|
|
ipage = self._image_page()
|
|
|
|
part = text.extract(ipage, 'hentai.org/g/', '"')[0]
|
|
|
|
self.gallery_token = part.split("/")[1]
|
|
|
|
self.wait()
|
|
|
|
gpage = self._gallery_page()
|
2017-08-25 16:44:11 +02:00
|
|
|
|
2019-01-26 15:52:55 +01:00
|
|
|
data = self.get_metadata(gpage)
|
2017-09-24 15:59:25 +02:00
|
|
|
self.count = data["count"]
|
2019-01-26 15:52:55 +01:00
|
|
|
|
|
|
|
yield Message.Version, 1
|
2015-10-31 16:50:20 +01:00
|
|
|
yield Message.Directory, data
|
2014-10-12 21:56:44 +02:00
|
|
|
|
2019-01-26 15:52:55 +01:00
|
|
|
images = itertools.chain(
|
|
|
|
(self.image_from_page(ipage),), self.images_from_api())
|
|
|
|
for url, image in images:
|
2016-09-20 19:01:16 +02:00
|
|
|
data.update(image)
|
2019-01-26 18:40:39 +01:00
|
|
|
if self.limits:
|
|
|
|
self._check_limits(data)
|
2016-09-19 16:13:26 +02:00
|
|
|
if "/fullimg.php" in url:
|
2016-09-30 16:43:43 +02:00
|
|
|
data["extension"] = ""
|
2017-08-28 21:03:32 +02:00
|
|
|
self.wait(1.5)
|
2016-09-20 19:01:16 +02:00
|
|
|
yield Message.Url, url, data
|
|
|
|
|
2019-01-26 15:52:55 +01:00
|
|
|
def get_metadata(self, page):
|
|
|
|
"""Extract gallery metadata"""
|
2019-01-15 13:35:58 +01:00
|
|
|
data, pos = text.extract_all(page, (
|
2019-01-26 15:52:55 +01:00
|
|
|
("title" , '<h1 id="gn">', '</h1>'),
|
|
|
|
("title_jp" , '<h1 id="gj">', '</h1>'),
|
|
|
|
("date" , '>Posted:</td><td class="gdt2">', '</td>'),
|
|
|
|
("parent" , '>Parent:</td><td class="gdt2"><a href="', '"'),
|
|
|
|
("visible" , '>Visible:</td><td class="gdt2">', '<'),
|
|
|
|
("language" , '>Language:</td><td class="gdt2">', ' '),
|
2018-04-03 18:59:53 +02:00
|
|
|
("gallery_size", '>File Size:</td><td class="gdt2">', '<'),
|
2019-01-26 15:52:55 +01:00
|
|
|
("count" , '>Length:</td><td class="gdt2">', ' '),
|
|
|
|
))
|
2019-01-15 13:35:58 +01:00
|
|
|
|
2017-03-28 13:12:44 +02:00
|
|
|
data["lang"] = util.language_to_code(data["language"])
|
2016-08-31 10:20:46 +02:00
|
|
|
data["title"] = text.unescape(data["title"])
|
|
|
|
data["title_jp"] = text.unescape(data["title_jp"])
|
2018-04-20 14:53:21 +02:00
|
|
|
data["count"] = text.parse_int(data["count"])
|
2019-01-26 15:52:55 +01:00
|
|
|
data["gallery_id"] = self.gallery_id
|
|
|
|
data["gallery_token"] = self.gallery_token
|
2018-05-10 22:07:55 +02:00
|
|
|
data["gallery_size"] = text.parse_bytes(
|
2018-04-03 18:59:53 +02:00
|
|
|
data["gallery_size"].rstrip("Bb"))
|
2019-01-15 13:35:58 +01:00
|
|
|
data["tags"] = [
|
|
|
|
text.unquote(tag)
|
|
|
|
for tag in text.extract_iter(page, 'hentai.org/tag/', '"', pos)
|
|
|
|
]
|
2016-09-20 19:01:16 +02:00
|
|
|
return data
|
2014-10-12 21:56:44 +02:00
|
|
|
|
2019-01-26 15:52:55 +01:00
|
|
|
def image_from_page(self, page):
|
2016-09-20 19:01:16 +02:00
|
|
|
"""Get image url and data from webpage"""
|
2019-01-26 15:52:55 +01:00
|
|
|
info = text.extract_all(page, (
|
2016-09-20 19:01:16 +02:00
|
|
|
(None , '<div id="i3"><a onclick="return load_image(', ''),
|
|
|
|
("nextkey" , "'", "'"),
|
|
|
|
("url" , '<img id="img" src="', '"'),
|
2017-04-28 15:59:56 +02:00
|
|
|
("origurl" , 'hentai.org/fullimg.php', '"'),
|
2018-04-03 18:59:53 +02:00
|
|
|
("originfo", 'ownload original', '<'),
|
2016-09-20 19:01:16 +02:00
|
|
|
("startkey", 'var startkey="', '";'),
|
|
|
|
("showkey" , 'var showkey="', '";'),
|
|
|
|
))[0]
|
2019-01-26 15:52:55 +01:00
|
|
|
self.key["start"] = info["startkey"]
|
|
|
|
self.key["show"] = info["showkey"]
|
|
|
|
self.key["next"] = info["nextkey"]
|
2017-02-01 00:53:19 +01:00
|
|
|
|
2019-01-26 15:52:55 +01:00
|
|
|
if self.original and info["origurl"]:
|
|
|
|
part = text.unescape(info["origurl"])
|
2017-04-28 15:59:56 +02:00
|
|
|
url = self.root + "/fullimg.php" + part
|
2019-01-26 15:52:55 +01:00
|
|
|
data = self._parse_original_info(info["originfo"])
|
2017-02-01 00:53:19 +01:00
|
|
|
else:
|
2019-01-26 15:52:55 +01:00
|
|
|
url = info["url"]
|
|
|
|
data = self._parse_image_info(url)
|
2017-02-01 00:53:19 +01:00
|
|
|
|
2019-01-26 15:52:55 +01:00
|
|
|
data["num"] = self.image_num
|
|
|
|
data["image_token"] = info["startkey"]
|
|
|
|
return url, text.nameext_from_url(info["url"], data)
|
2014-10-15 16:17:59 +02:00
|
|
|
|
2016-09-20 19:01:16 +02:00
|
|
|
def images_from_api(self):
|
|
|
|
"""Get image url and data from api calls"""
|
2017-04-28 15:59:56 +02:00
|
|
|
api_url = self.root + "/api.php"
|
2017-02-01 00:53:19 +01:00
|
|
|
nextkey = self.key["next"]
|
2014-10-12 21:56:44 +02:00
|
|
|
request = {
|
|
|
|
"method" : "showpage",
|
2019-01-26 15:52:55 +01:00
|
|
|
"gid" : self.gallery_id,
|
2016-09-20 19:01:16 +02:00
|
|
|
"imgkey" : nextkey,
|
|
|
|
"showkey": self.key["show"],
|
2014-10-12 21:56:44 +02:00
|
|
|
}
|
2019-01-26 15:52:55 +01:00
|
|
|
for request["page"] in range(self.image_num + 1, self.count + 1):
|
2018-06-25 22:39:43 +02:00
|
|
|
self.wait()
|
|
|
|
page = self.request(api_url, method="POST", json=request).json()
|
2016-09-20 19:01:16 +02:00
|
|
|
imgkey = nextkey
|
|
|
|
nextkey, pos = text.extract(page["i3"], "'", "'")
|
2017-02-01 00:53:19 +01:00
|
|
|
imgurl , pos = text.extract(page["i3"], 'id="img" src="', '"', pos)
|
2016-09-20 19:01:16 +02:00
|
|
|
origurl, pos = text.extract(page["i7"], '<a href="', '"')
|
2017-02-01 00:53:19 +01:00
|
|
|
|
|
|
|
if self.original and origurl:
|
|
|
|
url = text.unescape(origurl)
|
2018-04-03 18:59:53 +02:00
|
|
|
data = self._parse_original_info(
|
2019-01-26 15:52:55 +01:00
|
|
|
text.extract(page["i7"], "ownload original", "<", pos)[0])
|
2017-02-01 00:53:19 +01:00
|
|
|
else:
|
|
|
|
url = imgurl
|
2018-04-03 18:59:53 +02:00
|
|
|
data = self._parse_image_info(url)
|
|
|
|
|
|
|
|
data["num"] = request["page"]
|
|
|
|
data["image_token"] = imgkey
|
|
|
|
yield url, text.nameext_from_url(imgurl, data)
|
2017-02-01 00:53:19 +01:00
|
|
|
|
2016-09-20 19:01:16 +02:00
|
|
|
request["imgkey"] = nextkey
|
2015-11-19 17:04:54 +01:00
|
|
|
|
2019-01-26 15:52:55 +01:00
|
|
|
def _gallery_page(self):
|
|
|
|
url = "{}/g/{}/{}/".format(
|
|
|
|
self.root, self.gallery_id, self.gallery_token)
|
|
|
|
response = self.request(url, expect=range(400, 500))
|
|
|
|
page = response.text
|
|
|
|
|
|
|
|
if response.status_code == 404 and "Gallery Not Available" in page:
|
|
|
|
raise exception.AuthorizationError()
|
|
|
|
if page.startswith(("Key missing", "Gallery not found")):
|
|
|
|
raise exception.NotFoundError("gallery")
|
|
|
|
return page
|
|
|
|
|
|
|
|
def _image_page(self):
|
|
|
|
url = "{}/s/{}/{}-{}".format(
|
|
|
|
self.root, self.image_token, self.gallery_id, self.image_num)
|
|
|
|
page = self.request(url, expect=range(400, 500)).text
|
|
|
|
|
|
|
|
if page.startswith(("Invalid page", "Keep trying")):
|
|
|
|
raise exception.NotFoundError("image page")
|
|
|
|
return page
|
|
|
|
|
2019-01-26 18:40:39 +01:00
|
|
|
def _check_limits(self, data):
|
|
|
|
if not self._remaining or data["num"] % 20 == 0:
|
|
|
|
self._update_limits()
|
|
|
|
self._remaining -= data["cost"]
|
|
|
|
|
|
|
|
if self._remaining <= 0:
|
|
|
|
url = "{}/s/{}/{}-{}".format(
|
|
|
|
self.root, data["image_token"], self.gallery_id, data["num"])
|
|
|
|
self.log.error(
|
|
|
|
"Image limit reached! Reset it and continue with "
|
|
|
|
"'%s' as URL.", url)
|
|
|
|
raise exception.StopExtraction()
|
|
|
|
|
|
|
|
def _update_limits(self):
|
|
|
|
url = "https://e-hentai.org/home.php"
|
|
|
|
cookies = {
|
|
|
|
cookie.name: cookie.value
|
|
|
|
for cookie in self.session.cookies
|
|
|
|
if cookie.domain == self.cookiedomain and cookie.name != "igneous"
|
|
|
|
}
|
|
|
|
|
|
|
|
page = self.request(url, cookies=cookies).text
|
|
|
|
current, pos = text.extract(page, "<strong>", "</strong>")
|
|
|
|
maximum, pos = text.extract(page, "<strong>", "</strong>", pos)
|
|
|
|
self._remaining = text.parse_int(maximum) - text.parse_int(current)
|
|
|
|
|
2018-04-03 18:59:53 +02:00
|
|
|
@staticmethod
|
|
|
|
def _parse_image_info(url):
|
|
|
|
parts = url.split("/")[4].split("-")
|
|
|
|
return {
|
2018-04-20 14:53:21 +02:00
|
|
|
"width": text.parse_int(parts[2]),
|
|
|
|
"height": text.parse_int(parts[3]),
|
|
|
|
"size": text.parse_int(parts[1]),
|
2019-01-26 15:52:55 +01:00
|
|
|
"cost": 1,
|
2018-04-03 18:59:53 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def _parse_original_info(info):
|
|
|
|
parts = info.lstrip().split(" ")
|
2019-01-26 15:52:55 +01:00
|
|
|
size = text.parse_bytes(parts[3] + parts[4][0])
|
2018-04-03 18:59:53 +02:00
|
|
|
return {
|
2018-04-20 14:53:21 +02:00
|
|
|
"width": text.parse_int(parts[0]),
|
|
|
|
"height": text.parse_int(parts[2]),
|
2019-01-26 15:52:55 +01:00
|
|
|
"size": size,
|
|
|
|
"cost": 1 + math.ceil(size * 5 / 1024 / 1024)
|
2018-04-03 18:59:53 +02:00
|
|
|
}
|
|
|
|
|
2016-07-23 17:55:46 +02:00
|
|
|
|
2018-03-27 16:50:47 +02:00
|
|
|
class ExhentaiSearchExtractor(ExhentaiExtractor):
|
|
|
|
"""Extractor for exhentai search results"""
|
|
|
|
subcategory = "search"
|
2019-02-08 13:45:40 +01:00
|
|
|
pattern = BASE_PATTERN + r"/?\?(.*)$"
|
|
|
|
test = (
|
|
|
|
("https://exhentai.org/?f_search=touhou"),
|
2019-01-15 13:35:58 +01:00
|
|
|
(("https://exhentai.org/?f_doujinshi=0&f_manga=0&f_artistcg=0"
|
|
|
|
"&f_gamecg=0&f_western=0&f_non-h=1&f_imageset=0&f_cosplay=0"
|
|
|
|
"&f_asianporn=0&f_misc=0&f_search=touhou&f_apply=Apply+Filter"), {
|
2019-02-08 13:45:40 +01:00
|
|
|
"pattern": ExhentaiGalleryExtractor.pattern,
|
2019-01-15 13:35:58 +01:00
|
|
|
"range": "1-30",
|
|
|
|
"count": 30,
|
|
|
|
}),
|
2019-02-08 13:45:40 +01:00
|
|
|
)
|
2016-07-23 17:55:46 +02:00
|
|
|
|
2018-03-27 16:50:47 +02:00
|
|
|
def __init__(self, match):
|
|
|
|
ExhentaiExtractor.__init__(self)
|
2018-03-27 18:58:42 +02:00
|
|
|
self.params = text.parse_query(match.group(1) or "")
|
2018-04-20 14:53:21 +02:00
|
|
|
self.params["page"] = text.parse_int(self.params.get("page"))
|
2018-03-27 18:58:42 +02:00
|
|
|
self.url = self.root
|
2016-07-23 17:55:46 +02:00
|
|
|
|
2018-03-27 16:50:47 +02:00
|
|
|
def items(self):
|
|
|
|
self.login()
|
2018-03-27 18:58:42 +02:00
|
|
|
self.init()
|
2018-03-27 16:50:47 +02:00
|
|
|
yield Message.Version, 1
|
2017-08-28 21:03:32 +02:00
|
|
|
|
2018-03-27 16:50:47 +02:00
|
|
|
while True:
|
2018-03-27 18:58:42 +02:00
|
|
|
page = self.request(self.url, params=self.params).text
|
|
|
|
|
|
|
|
for row in text.extract_iter(page, '<tr class="gtr', '</tr>'):
|
|
|
|
yield self._parse_row(row)
|
|
|
|
|
|
|
|
if 'class="ptdd">><' in page or ">No hits found</p>" in page:
|
2018-03-27 16:50:47 +02:00
|
|
|
return
|
|
|
|
self.params["page"] += 1
|
|
|
|
self.wait()
|
2018-03-27 18:58:42 +02:00
|
|
|
|
|
|
|
def init(self):
|
|
|
|
pass
|
|
|
|
|
|
|
|
def _parse_row(self, row, extr=text.extract):
|
|
|
|
"""Parse information of a single result row"""
|
|
|
|
gtype, pos = extr(row, ' alt="', '"')
|
|
|
|
date , pos = extr(row, 'nowrap">', '<', pos)
|
|
|
|
url , pos = extr(row, ' class="it5"><a href="', '"', pos)
|
|
|
|
title, pos = extr(row, '>', '<', pos)
|
|
|
|
key , last = self._parse_last(row, pos)
|
|
|
|
parts = url.rsplit("/", 3)
|
|
|
|
|
|
|
|
return Message.Queue, url, {
|
|
|
|
"type": gtype,
|
|
|
|
"date": date,
|
2018-04-20 14:53:21 +02:00
|
|
|
"gallery_id": text.parse_int(parts[1]),
|
2018-03-27 18:58:42 +02:00
|
|
|
"gallery_token": parts[2],
|
|
|
|
"title": text.unescape(title),
|
|
|
|
key: last,
|
|
|
|
}
|
|
|
|
|
|
|
|
def _parse_last(self, row, pos):
|
2018-04-03 18:59:53 +02:00
|
|
|
"""Parse the last column of a result row"""
|
2018-03-27 18:58:42 +02:00
|
|
|
return "uploader", text.remove_html(
|
|
|
|
text.extract(row, '<td class="itu">', '</td>', pos)[0])
|
|
|
|
|
|
|
|
|
|
|
|
class ExhentaiFavoriteExtractor(ExhentaiSearchExtractor):
|
|
|
|
"""Extractor for favorited exhentai galleries"""
|
|
|
|
subcategory = "favorite"
|
2019-02-08 13:45:40 +01:00
|
|
|
pattern = BASE_PATTERN + r"/favorites\.php(?:\?(.*))?"
|
|
|
|
test = (
|
|
|
|
("https://exhentai.org/favorites.php"),
|
2018-06-17 21:49:13 +02:00
|
|
|
("https://exhentai.org/favorites.php?favcat=1&f_search=touhou"
|
2019-02-08 13:45:40 +01:00
|
|
|
"&f_apply=Search+Favorites"),
|
|
|
|
)
|
2018-03-27 18:58:42 +02:00
|
|
|
|
|
|
|
def __init__(self, match):
|
|
|
|
ExhentaiSearchExtractor.__init__(self, match)
|
|
|
|
self.url = self.root + "/favorites.php"
|
|
|
|
|
|
|
|
def init(self):
|
2018-04-03 18:59:53 +02:00
|
|
|
# The first request to '/favorites.php' will return an empty list
|
|
|
|
# if the 's' cookie isn't set (maybe on some other conditions as well),
|
|
|
|
# so we make a "noop" request to get all the correct cookie values
|
|
|
|
# and to get a filled favorite list on the next one.
|
2018-03-27 18:58:42 +02:00
|
|
|
# TODO: proper cookie storage
|
|
|
|
self.request(self.url)
|
|
|
|
self.wait(1.5)
|
|
|
|
|
|
|
|
def _parse_last(self, row, pos):
|
|
|
|
return "date_favorited", text.extract(row, 'nowrap">', '<', pos)[0]
|