1
0
mirror of https://github.com/mikf/gallery-dl.git synced 2024-11-22 02:32:33 +01:00

[tsumino] add login capabilities (#161)

This commit is contained in:
Mike Fährmann 2019-01-30 17:58:48 +01:00
parent dd358b4564
commit bfbbac4495
No known key found for this signature in database
GPG Key ID: 5680CA389D365A88
5 changed files with 30 additions and 6 deletions

View File

@ -156,7 +156,7 @@ Some extractors require you to provide valid login-credentials in the form of
a username & password pair.
This is necessary for ``pixiv``, ``nijie`` and ``seiga``
and optional (but strongly recommended) for ``exhentai``, ``luscious``,
``sankaku``, ``idolcomplex`` and ``wallhaven``.
``sankaku``, ``idolcomplex``, ``tsumino`` and ``wallhaven``.
You can set the necessary information in your configuration file
(cf. gallery-dl.conf_)

View File

@ -83,7 +83,7 @@ Simply Hentai https://www.simply-hentai.com/ Galleries, individual I
SlideShare https://www.slideshare.net/ Presentations
SmugMug https://www.smugmug.com/ |Capabilities-8| Optional (OAuth)
The /b/ Archive https://thebarchive.com/ Threads
Tsumino https://www.tsumino.com/ Galleries
Tsumino https://www.tsumino.com/ Galleries Optional
Tumblr https://www.tumblr.com/ Images from Users, Likes, Posts, Tag-Searches Optional (OAuth)
Twitter https://twitter.com/ Media Timelines, Timelines, Tweets
Wallhaven https://alpha.wallhaven.cc/ individual Images, Search Results Optional

View File

@ -223,6 +223,7 @@ class ChapterExtractor(Extractor):
self.url = url
def items(self):
self.login()
page = self.request(self.url).text
data = self.get_metadata(page)
imgs = self.get_images(page)
@ -230,7 +231,7 @@ class ChapterExtractor(Extractor):
if "count" in data:
images = zip(
range(1, data["count"]+1),
imgs
imgs,
)
else:
try:
@ -246,6 +247,9 @@ class ChapterExtractor(Extractor):
data.update(imgdata)
yield Message.Url, url, text.nameext_from_url(url, data)
def login(self):
"""Login and set necessary cookies"""
def get_metadata(self, page):
"""Return a dict with general metadata"""

View File

@ -9,7 +9,8 @@
"""Extractors for https://www.tsumino.com/"""
from .common import ChapterExtractor
from .. import text
from .. import text, exception
from ..cache import cache
class TsuminoGalleryExtractor(ChapterExtractor):
@ -19,6 +20,7 @@ class TsuminoGalleryExtractor(ChapterExtractor):
filename_fmt = "{category}_{gallery_id}_{page:>03}.{extension}"
directory_fmt = ["{category}", "{gallery_id} {title}"]
archive_fmt = "{gallery_id}_{page}"
cookiedomain = "www.tsumino.com"
pattern = [r"(?i)(?:https?://)?(?:www\.)?tsumino\.com"
r"/(?:Book/Info|Read/View)/(\d+)"]
test = [
@ -35,8 +37,25 @@ class TsuminoGalleryExtractor(ChapterExtractor):
url = "{}/Book/Info/{}".format(self.root, self.gallery_id)
ChapterExtractor.__init__(self, url)
self.session.cookies.setdefault(
"ASP.NET_SessionId", "x1drgggilez4cpkttneukrc5")
def login(self):
username, password = self._get_auth_info()
if username:
self._update_cookies(self._login_impl(username, password))
else:
self.session.cookies.setdefault(
"ASP.NET_SessionId", "x1drgggilez4cpkttneukrc5")
@cache(maxage=14*24*60*60, keyarg=1)
def _login_impl(self, username, password):
self.log.info("Logging in as %s", username)
url = "{}/Account/Login".format(self.root)
headers = {"Referer": url}
data = {"Username": username, "Password": password}
response = self.request(url, method="POST", headers=headers, data=data)
if not response.history:
raise exception.AuthenticationError()
return {".aotsumino": response.history[0].cookies[".aotsumino"]}
def get_metadata(self, page):
extr = text.extract

View File

@ -100,6 +100,7 @@ AUTH_MAP = {
"sankaku" : "Optional",
"seiga" : "Required",
"smugmug" : "Optional (OAuth)",
"tsumino" : "Optional",
"tumblr" : "Optional (OAuth)",
"wallhaven" : "Optional",
}