2019-09-02 21:20:02 +02:00
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
|
2023-09-11 16:30:55 +02:00
|
|
|
# Copyright 2019-2023 Mike Fährmann
|
2019-09-02 21:20:02 +02:00
|
|
|
#
|
|
|
|
# This program is free software; you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License version 2 as
|
|
|
|
# published by the Free Software Foundation.
|
|
|
|
|
|
|
|
"""Extractors for https://fuskator.com/"""
|
|
|
|
|
|
|
|
from .common import GalleryExtractor, Extractor, Message
|
|
|
|
from .. import text
|
|
|
|
import time
|
|
|
|
|
|
|
|
|
|
|
|
class FuskatorGalleryExtractor(GalleryExtractor):
|
|
|
|
"""Extractor for image galleries on fuskator.com"""
|
|
|
|
category = "fuskator"
|
|
|
|
root = "https://fuskator.com"
|
2020-10-22 23:12:59 +02:00
|
|
|
pattern = r"(?:https?://)?fuskator\.com/(?:thumbs|expanded)/([^/?#]+)"
|
2023-09-11 16:30:55 +02:00
|
|
|
example = "https://fuskator.com/thumbs/ID/"
|
2019-09-02 21:20:02 +02:00
|
|
|
|
|
|
|
def __init__(self, match):
|
|
|
|
self.gallery_hash = match.group(1)
|
|
|
|
url = "{}/thumbs/{}/".format(self.root, self.gallery_hash)
|
|
|
|
GalleryExtractor.__init__(self, match, url)
|
|
|
|
|
|
|
|
def metadata(self, page):
|
|
|
|
headers = {
|
2019-10-16 18:12:07 +02:00
|
|
|
"Referer" : self.gallery_url,
|
2019-09-02 21:20:02 +02:00
|
|
|
"X-Requested-With": "XMLHttpRequest",
|
|
|
|
}
|
|
|
|
auth = self.request(
|
|
|
|
self.root + "/ajax/auth.aspx", method="POST", headers=headers,
|
|
|
|
).text
|
|
|
|
|
|
|
|
params = {
|
|
|
|
"X-Auth": auth,
|
|
|
|
"hash" : self.gallery_hash,
|
|
|
|
"_" : int(time.time()),
|
|
|
|
}
|
|
|
|
self.data = data = self.request(
|
|
|
|
self.root + "/ajax/gal.aspx", params=params, headers=headers,
|
|
|
|
).json()
|
|
|
|
|
2022-11-04 23:39:38 +01:00
|
|
|
title = text.extr(page, "<title>", "</title>").strip()
|
2019-09-02 21:20:02 +02:00
|
|
|
title, _, gallery_id = title.rpartition("#")
|
|
|
|
|
|
|
|
return {
|
|
|
|
"gallery_id" : text.parse_int(gallery_id),
|
|
|
|
"gallery_hash": self.gallery_hash,
|
|
|
|
"title" : text.unescape(title[:-15]),
|
|
|
|
"views" : data["hits"],
|
|
|
|
"score" : data["rating"],
|
|
|
|
"tags" : data["tags"].split(","),
|
|
|
|
"count" : len(data["images"]),
|
|
|
|
}
|
|
|
|
|
|
|
|
def images(self, page):
|
|
|
|
for image in self.data["images"]:
|
|
|
|
yield "https:" + image["imageUrl"], image
|
|
|
|
|
|
|
|
|
|
|
|
class FuskatorSearchExtractor(Extractor):
|
|
|
|
"""Extractor for search results on fuskator.com"""
|
|
|
|
category = "fuskator"
|
|
|
|
subcategory = "search"
|
|
|
|
root = "https://fuskator.com"
|
|
|
|
pattern = r"(?:https?://)?fuskator\.com(/(?:search|page)/.+)"
|
2023-09-11 16:30:55 +02:00
|
|
|
example = "https://fuskator.com/search/TAG/"
|
2019-09-02 21:20:02 +02:00
|
|
|
|
|
|
|
def __init__(self, match):
|
|
|
|
Extractor.__init__(self, match)
|
|
|
|
self.path = match.group(1)
|
|
|
|
|
|
|
|
def items(self):
|
|
|
|
url = self.root + self.path
|
|
|
|
data = {"_extractor": FuskatorGalleryExtractor}
|
|
|
|
|
|
|
|
while True:
|
|
|
|
page = self.request(url).text
|
|
|
|
for path in text.extract_iter(
|
|
|
|
page, 'class="pic_pad"><a href="', '"'):
|
|
|
|
yield Message.Queue, self.root + path, data
|
|
|
|
|
2022-11-04 23:39:38 +01:00
|
|
|
pages = text.extr(page, 'class="pages"><span>', '>>><')
|
2019-09-02 21:20:02 +02:00
|
|
|
if not pages:
|
|
|
|
return
|
|
|
|
url = self.root + text.rextract(pages, 'href="', '"')[0]
|