2015-11-09 02:29:33 +01:00
|
|
|
# -*- coding: utf-8 -*-
|
2014-10-12 21:56:44 +02:00
|
|
|
|
2018-01-30 22:49:16 +01:00
|
|
|
# Copyright 2014-2018 Mike Fährmann
|
2015-11-09 02:29:33 +01:00
|
|
|
#
|
|
|
|
# This program is free software; you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License version 2 as
|
|
|
|
# published by the Free Software Foundation.
|
|
|
|
|
|
|
|
"""Extract images from https://chan.sankakucomplex.com/"""
|
|
|
|
|
2017-12-11 21:44:27 +01:00
|
|
|
from .common import SharedConfigExtractor, Message
|
2017-10-14 23:01:33 +02:00
|
|
|
from .. import text, util, exception
|
|
|
|
from ..cache import cache
|
2018-07-13 16:20:14 +02:00
|
|
|
import collections
|
2017-10-14 23:01:33 +02:00
|
|
|
import random
|
2018-07-13 16:20:14 +02:00
|
|
|
import time
|
|
|
|
import re
|
2015-11-09 02:29:33 +01:00
|
|
|
|
2017-02-01 00:53:19 +01:00
|
|
|
|
2017-12-11 21:44:27 +01:00
|
|
|
class SankakuExtractor(SharedConfigExtractor):
|
|
|
|
"""Base class for sankaku extractors"""
|
|
|
|
basecategory = "booru"
|
2015-11-21 04:26:30 +01:00
|
|
|
category = "sankaku"
|
|
|
|
filename_fmt = "{category}_{id}_{md5}.{extension}"
|
2017-10-14 23:01:33 +02:00
|
|
|
cookienames = ("login", "pass_hash")
|
2018-01-09 21:43:52 +01:00
|
|
|
cookiedomain = "chan.sankakucomplex.com"
|
2018-01-09 17:52:12 +01:00
|
|
|
subdomain = "chan"
|
2014-10-12 21:56:44 +02:00
|
|
|
|
2017-12-11 21:44:27 +01:00
|
|
|
def __init__(self):
|
|
|
|
SharedConfigExtractor.__init__(self)
|
2018-01-09 17:52:12 +01:00
|
|
|
self.root = "https://" + self.cookiedomain
|
2017-10-14 23:01:33 +02:00
|
|
|
self.logged_in = True
|
2017-12-12 19:18:43 +01:00
|
|
|
self.start_page = 1
|
2017-12-11 21:44:27 +01:00
|
|
|
self.start_post = 0
|
2018-07-13 16:20:14 +02:00
|
|
|
self.extags = self.config("tags", False)
|
2018-08-03 17:06:51 +02:00
|
|
|
self.wait_min = self.config("wait-min", 3.0)
|
|
|
|
self.wait_max = self.config("wait-max", 6.0)
|
2017-10-14 23:01:33 +02:00
|
|
|
if self.wait_max < self.wait_min:
|
|
|
|
self.wait_max = self.wait_min
|
2014-10-12 21:56:44 +02:00
|
|
|
|
2015-11-09 02:29:33 +01:00
|
|
|
def items(self):
|
2017-10-14 23:01:33 +02:00
|
|
|
self.login()
|
2018-03-01 17:40:31 +01:00
|
|
|
data = self.get_metadata()
|
|
|
|
|
2015-11-10 00:55:01 +01:00
|
|
|
yield Message.Version, 1
|
2018-03-01 17:40:31 +01:00
|
|
|
yield Message.Directory, data
|
2015-11-09 02:29:33 +01:00
|
|
|
|
2017-12-11 21:44:27 +01:00
|
|
|
for post_id in util.advance(self.get_posts(), self.start_post):
|
|
|
|
self.wait()
|
2018-03-01 17:40:31 +01:00
|
|
|
post = self.get_post_data(post_id)
|
|
|
|
url = post["file_url"]
|
|
|
|
post.update(data)
|
|
|
|
yield Message.Url, url, text.nameext_from_url(url, post)
|
2015-11-09 02:29:33 +01:00
|
|
|
|
2017-12-11 21:44:27 +01:00
|
|
|
def skip(self, num):
|
|
|
|
self.start_post += num
|
|
|
|
return num
|
|
|
|
|
|
|
|
def get_metadata(self):
|
|
|
|
"""Return general metadata"""
|
|
|
|
return {}
|
2015-11-09 02:29:33 +01:00
|
|
|
|
2017-12-11 21:44:27 +01:00
|
|
|
def get_posts(self):
|
|
|
|
"""Return an iterable containing all relevant post ids"""
|
|
|
|
|
|
|
|
def get_post_data(self, post_id, extr=text.extract):
|
|
|
|
"""Extract metadata of a single post"""
|
|
|
|
url = self.root + "/post/show/" + post_id
|
2017-10-14 23:01:33 +02:00
|
|
|
page = self.request(url, retries=10).text
|
2017-12-11 21:44:27 +01:00
|
|
|
|
2018-01-09 17:52:12 +01:00
|
|
|
tags , pos = extr(page, "<title>", " | ")
|
2017-12-11 21:44:27 +01:00
|
|
|
vavg , pos = extr(page, "itemprop=ratingValue>", "<", pos)
|
|
|
|
vcnt , pos = extr(page, "itemprop=reviewCount>", "<", pos)
|
|
|
|
_ , pos = extr(page, "Posted: <", "", pos)
|
|
|
|
created, pos = extr(page, ' title="', '"', pos)
|
|
|
|
rating = extr(page, "<li>Rating: ", "<", pos)[0]
|
|
|
|
|
|
|
|
file_url, pos = extr(page, '<li>Original: <a href="', '"', pos)
|
2017-12-07 15:45:43 +01:00
|
|
|
if file_url:
|
2017-12-11 21:44:27 +01:00
|
|
|
width , pos = extr(page, '>', 'x', pos)
|
|
|
|
height, pos = extr(page, '', ' ', pos)
|
2017-12-07 15:45:43 +01:00
|
|
|
else:
|
2017-12-11 21:44:27 +01:00
|
|
|
width , pos = extr(page, '<object width=', ' ', pos)
|
|
|
|
height, pos = extr(page, 'height=', '>', pos)
|
|
|
|
file_url = extr(page, '<embed src="', '"', pos)[0]
|
|
|
|
|
2018-07-13 16:20:14 +02:00
|
|
|
data = {
|
2018-04-20 14:53:21 +02:00
|
|
|
"id": text.parse_int(post_id),
|
2017-12-11 21:44:27 +01:00
|
|
|
"md5": file_url.rpartition("/")[2].partition(".")[0],
|
|
|
|
"tags": tags,
|
|
|
|
"vote_average": float(vavg or 0),
|
2018-04-20 14:53:21 +02:00
|
|
|
"vote_count": text.parse_int(vcnt),
|
2017-12-11 21:44:27 +01:00
|
|
|
"created_at": created,
|
|
|
|
"rating": (rating or "?")[0].lower(),
|
2017-12-07 15:45:43 +01:00
|
|
|
"file_url": "https:" + text.unescape(file_url),
|
2018-04-20 14:53:21 +02:00
|
|
|
"width": text.parse_int(width),
|
|
|
|
"height": text.parse_int(height),
|
2017-12-11 21:44:27 +01:00
|
|
|
}
|
2017-10-14 23:01:33 +02:00
|
|
|
|
2018-07-13 16:20:14 +02:00
|
|
|
if self.extags:
|
|
|
|
tags = collections.defaultdict(list)
|
|
|
|
tags_html = text.extract(page, '<ul id=tag-sidebar>', '</ul>')[0]
|
|
|
|
pattern = re.compile(r'tag-type-([^>]+)><a href="/\?tags=([^"]+)')
|
|
|
|
for tag_type, tag_name in pattern.findall(tags_html):
|
|
|
|
tags[tag_type].append(text.unquote(tag_name))
|
|
|
|
for key, value in tags.items():
|
|
|
|
data["tags_" + key] = " ".join(value)
|
|
|
|
|
|
|
|
return data
|
|
|
|
|
2017-10-14 23:01:33 +02:00
|
|
|
def wait(self):
|
|
|
|
"""Wait for a randomly chosen amount of seconds"""
|
|
|
|
time.sleep(random.uniform(self.wait_min, self.wait_max))
|
|
|
|
|
|
|
|
def login(self):
|
|
|
|
"""Login and set necessary cookies"""
|
|
|
|
if self._check_cookies(self.cookienames):
|
|
|
|
return
|
|
|
|
username, password = self._get_auth_info()
|
|
|
|
if username:
|
2018-01-09 17:52:12 +01:00
|
|
|
cookies = self._login_impl((username, self.subdomain), password)
|
2017-10-14 23:01:33 +02:00
|
|
|
for key, value in cookies.items():
|
|
|
|
self.session.cookies.set(
|
|
|
|
key, value, domain=self.cookiedomain)
|
|
|
|
else:
|
|
|
|
self.logged_in = False
|
|
|
|
|
|
|
|
@cache(maxage=90*24*60*60, keyarg=1)
|
2018-01-09 17:52:12 +01:00
|
|
|
def _login_impl(self, usertuple, password):
|
2017-10-14 23:01:33 +02:00
|
|
|
"""Actual login implementation"""
|
2018-01-09 17:52:12 +01:00
|
|
|
username = usertuple[0]
|
2017-10-14 23:01:33 +02:00
|
|
|
self.log.info("Logging in as %s", username)
|
|
|
|
params = {
|
|
|
|
"url": "",
|
|
|
|
"user[name]": username,
|
|
|
|
"user[password]": password,
|
|
|
|
"commit": "Login",
|
|
|
|
}
|
|
|
|
response = self.request(self.root + "/user/authenticate",
|
|
|
|
method="POST", params=params)
|
|
|
|
if not response.history or response.url != self.root + "/user/home":
|
|
|
|
raise exception.AuthenticationError()
|
2017-10-16 20:45:14 +02:00
|
|
|
cookies = response.history[0].cookies
|
|
|
|
return {c: cookies[c] for c in self.cookienames}
|
2017-12-11 21:44:27 +01:00
|
|
|
|
|
|
|
|
|
|
|
class SankakuTagExtractor(SankakuExtractor):
|
|
|
|
"""Extractor for images from chan.sankakucomplex.com by search-tags"""
|
|
|
|
subcategory = "tag"
|
2018-03-01 17:40:31 +01:00
|
|
|
directory_fmt = ["{category}", "{search_tags}"]
|
|
|
|
archive_fmt = "t_{search_tags}_{id}"
|
2018-02-27 16:36:19 +01:00
|
|
|
pattern = [r"(?:https?://)?chan\.sankakucomplex\.com/\?([^#]*)"]
|
2017-12-11 21:44:27 +01:00
|
|
|
test = [
|
|
|
|
("https://chan.sankakucomplex.com/?tags=bonocho", {
|
|
|
|
"count": 5,
|
|
|
|
"pattern": (r"https://cs\.sankakucomplex\.com/data/[^/]{2}/[^/]{2}"
|
|
|
|
r"/[^/]{32}\.\w+\?e=\d+&m=[^&#]+"),
|
|
|
|
}),
|
2018-02-27 16:36:19 +01:00
|
|
|
# respect 'page' query parameter
|
|
|
|
("https://chan.sankakucomplex.com/?tags=bonocho&page=2", {
|
|
|
|
"count": 0,
|
|
|
|
}),
|
|
|
|
# respect 'next' query parameter
|
|
|
|
("https://chan.sankakucomplex.com/?tags=bonocho&next=182284", {
|
|
|
|
"count": 1,
|
|
|
|
}),
|
|
|
|
# error on five or more tags
|
2017-12-11 21:44:27 +01:00
|
|
|
("https://chan.sankakucomplex.com/?tags=bonocho+a+b+c+d", {
|
|
|
|
"options": (("username", None),),
|
|
|
|
"exception": exception.StopExtraction,
|
2018-02-26 11:13:49 +01:00
|
|
|
}),
|
2018-02-27 16:36:19 +01:00
|
|
|
# match arbitrary query parameters
|
2018-02-26 11:13:49 +01:00
|
|
|
(("https://chan.sankakucomplex.com/"
|
2018-02-27 16:36:19 +01:00
|
|
|
"?tags=marie_rose&page=98&next=3874906&commit=Search"), None),
|
2017-12-11 21:44:27 +01:00
|
|
|
]
|
|
|
|
per_page = 20
|
|
|
|
|
|
|
|
def __init__(self, match):
|
|
|
|
SankakuExtractor.__init__(self)
|
2018-02-26 11:13:49 +01:00
|
|
|
query = text.parse_query(match.group(1))
|
2018-02-27 16:36:19 +01:00
|
|
|
self.tags = text.unquote(query.get("tags", "").replace("+", " "))
|
2018-04-20 14:53:21 +02:00
|
|
|
self.start_page = text.parse_int(query.get("page"), 1)
|
|
|
|
self.next = text.parse_int(query.get("next"), 0)
|
2017-12-11 21:44:27 +01:00
|
|
|
|
|
|
|
def skip(self, num):
|
2018-02-27 16:36:19 +01:00
|
|
|
if self.next:
|
|
|
|
self.start_post += num
|
|
|
|
else:
|
|
|
|
pages, posts = divmod(num, self.per_page)
|
|
|
|
self.start_page += pages
|
|
|
|
self.start_post += posts
|
|
|
|
return num
|
2017-12-11 21:44:27 +01:00
|
|
|
|
|
|
|
def get_metadata(self):
|
2018-02-27 16:36:19 +01:00
|
|
|
if not self.next:
|
|
|
|
max_page = 50 if self.logged_in else 25
|
|
|
|
if self.start_page > max_page:
|
|
|
|
self.log.info("Traversing from page %d to page %d",
|
|
|
|
max_page, self.start_page)
|
|
|
|
self.start_post += self.per_page * (self.start_page - max_page)
|
|
|
|
self.start_page = max_page
|
|
|
|
|
2017-12-11 21:44:27 +01:00
|
|
|
tags = self.tags.split()
|
|
|
|
if not self.logged_in and len(tags) > 4:
|
|
|
|
self.log.error("Unauthenticated users cannot use "
|
|
|
|
"more than 4 tags at once.")
|
|
|
|
raise exception.StopExtraction()
|
2018-03-01 17:40:31 +01:00
|
|
|
return {"search_tags": " ".join(tags)}
|
2017-12-11 21:44:27 +01:00
|
|
|
|
|
|
|
def get_posts(self):
|
2018-02-27 16:36:19 +01:00
|
|
|
params = {"tags": self.tags}
|
|
|
|
|
|
|
|
if self.next:
|
2018-02-26 11:13:49 +01:00
|
|
|
params["next"] = self.next
|
2018-02-27 16:36:19 +01:00
|
|
|
else:
|
|
|
|
params["page"] = self.start_page
|
2017-12-11 21:44:27 +01:00
|
|
|
|
2018-02-27 16:36:19 +01:00
|
|
|
while True:
|
|
|
|
self.wait()
|
2017-12-11 21:44:27 +01:00
|
|
|
page = self.request(self.root, params=params, retries=10).text
|
|
|
|
pos = page.find("<div id=more-popular-posts-link>") + 1
|
|
|
|
|
|
|
|
ids = list(text.extract_iter(page, '" id=p', '>', pos))
|
|
|
|
if not ids:
|
|
|
|
return
|
|
|
|
yield from ids
|
|
|
|
|
2018-02-27 16:36:19 +01:00
|
|
|
params["page"] = 2
|
2018-04-20 14:53:21 +02:00
|
|
|
params["next"] = text.parse_int(ids[-1]) - 1
|
2017-12-12 18:20:15 +01:00
|
|
|
|
|
|
|
|
2017-12-12 19:18:43 +01:00
|
|
|
class SankakuPoolExtractor(SankakuExtractor):
|
|
|
|
"""Extractor for image-pools from chan.sankakucomplex.com"""
|
|
|
|
subcategory = "pool"
|
|
|
|
directory_fmt = ["{category}", "pool", "{pool}"]
|
2018-03-01 17:40:31 +01:00
|
|
|
archive_fmt = "p_{pool}_{id}"
|
2017-12-12 19:18:43 +01:00
|
|
|
pattern = [r"(?:https?://)?chan\.sankakucomplex\.com/pool/show/(\d+)"]
|
|
|
|
test = [("https://chan.sankakucomplex.com/pool/show/90", {
|
|
|
|
"count": 5,
|
|
|
|
})]
|
|
|
|
per_page = 24
|
|
|
|
|
|
|
|
def __init__(self, match):
|
|
|
|
SankakuExtractor.__init__(self)
|
|
|
|
self.pool_id = match.group(1)
|
|
|
|
|
|
|
|
def skip(self, num):
|
|
|
|
pages, posts = divmod(num, self.per_page)
|
|
|
|
self.start_page += pages
|
|
|
|
self.start_post += posts
|
|
|
|
return num
|
|
|
|
|
|
|
|
def get_metadata(self):
|
|
|
|
return {"pool": self.pool_id}
|
|
|
|
|
|
|
|
def get_posts(self):
|
|
|
|
url = self.root + "/pool/show/" + self.pool_id
|
|
|
|
params = {"page": self.start_page}
|
|
|
|
|
|
|
|
while True:
|
|
|
|
page = self.request(url, params=params, retries=10).text
|
|
|
|
ids = list(text.extract_iter(page, '" id=p', '>'))
|
|
|
|
|
|
|
|
yield from ids
|
|
|
|
if len(ids) < self.per_page:
|
|
|
|
return
|
|
|
|
|
|
|
|
params["page"] += 1
|
|
|
|
|
|
|
|
|
2017-12-12 18:20:15 +01:00
|
|
|
class SankakuPostExtractor(SankakuExtractor):
|
|
|
|
"""Extractor for single images from chan.sankakucomplex.com"""
|
|
|
|
subcategory = "post"
|
2018-03-01 17:40:31 +01:00
|
|
|
archive_fmt = "{id}"
|
2017-12-12 18:20:15 +01:00
|
|
|
pattern = [r"(?:https?://)?chan\.sankakucomplex\.com/post/show/(\d+)"]
|
|
|
|
test = [("https://chan.sankakucomplex.com/post/show/360451", {
|
|
|
|
"content": "5e255713cbf0a8e0801dc423563c34d896bb9229",
|
2018-07-13 16:20:14 +02:00
|
|
|
"options": (("tags", True),),
|
|
|
|
"keyword": {
|
|
|
|
"tags_artist": "bonocho",
|
|
|
|
"tags_copyright": "batman_(series) the_dark_knight",
|
|
|
|
"tags_medium": "sketch copyright_name",
|
|
|
|
"tags_studio": "dc_comics",
|
|
|
|
"tags_character": str,
|
|
|
|
"tags_general": str,
|
|
|
|
},
|
2017-12-12 18:20:15 +01:00
|
|
|
})]
|
|
|
|
|
|
|
|
def __init__(self, match):
|
|
|
|
SankakuExtractor.__init__(self)
|
|
|
|
self.post_id = match.group(1)
|
|
|
|
|
|
|
|
def get_posts(self):
|
|
|
|
return (self.post_id,)
|