2018-11-23 09:25:02 +01:00
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
|
2020-11-17 00:34:07 +01:00
|
|
|
# Copyright 2019-2020 Mike Fährmann
|
2018-11-23 09:25:02 +01:00
|
|
|
#
|
|
|
|
# This program is free software; you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License version 2 as
|
|
|
|
# published by the Free Software Foundation.
|
|
|
|
|
2019-01-07 16:59:26 +01:00
|
|
|
"""Generic extractors for *reactor sites"""
|
2018-11-23 09:25:02 +01:00
|
|
|
|
2020-11-17 00:34:07 +01:00
|
|
|
from .common import Extractor, Message
|
2018-11-23 09:25:02 +01:00
|
|
|
from .. import text
|
2019-01-07 16:59:26 +01:00
|
|
|
import urllib.parse
|
2019-01-07 18:04:16 +01:00
|
|
|
import random
|
|
|
|
import time
|
2018-11-23 09:25:02 +01:00
|
|
|
import json
|
|
|
|
|
|
|
|
|
2020-10-11 18:15:06 +02:00
|
|
|
BASE_PATTERN = r"(?:https?://)?((?:[^/.]+\.)?reactor\.cc)"
|
2018-11-23 14:41:26 +01:00
|
|
|
|
|
|
|
|
2020-11-17 00:34:07 +01:00
|
|
|
class ReactorExtractor(Extractor):
|
2019-01-07 16:59:26 +01:00
|
|
|
"""Base class for *reactor.cc extractors"""
|
|
|
|
basecategory = "reactor"
|
2018-11-23 09:25:02 +01:00
|
|
|
filename_fmt = "{post_id}_{num:>02}{title[:100]:?_//}.{extension}"
|
|
|
|
archive_fmt = "{post_id}_{num}"
|
|
|
|
|
|
|
|
def __init__(self, match):
|
2019-02-11 13:31:10 +01:00
|
|
|
Extractor.__init__(self, match)
|
2018-11-23 14:41:26 +01:00
|
|
|
self.root = "http://" + match.group(1)
|
2018-11-23 09:25:02 +01:00
|
|
|
self.session.headers["Referer"] = self.root
|
|
|
|
|
2019-01-07 18:04:16 +01:00
|
|
|
self.wait_min = self.config("wait-min", 3)
|
|
|
|
self.wait_max = self.config("wait-max", 6)
|
|
|
|
if self.wait_max < self.wait_min:
|
|
|
|
self.wait_max = self.wait_min
|
|
|
|
|
2019-01-07 16:59:26 +01:00
|
|
|
if not self.category:
|
|
|
|
# set category based on domain name
|
|
|
|
netloc = urllib.parse.urlsplit(self.root).netloc
|
|
|
|
self.category = netloc.rpartition(".")[0]
|
|
|
|
|
2018-11-23 09:25:02 +01:00
|
|
|
def items(self):
|
|
|
|
data = self.metadata()
|
|
|
|
yield Message.Version, 1
|
|
|
|
yield Message.Directory, data
|
|
|
|
for post in self.posts():
|
|
|
|
for image in self._parse_post(post):
|
2019-05-09 22:50:25 +02:00
|
|
|
url = image["url"]
|
2018-11-23 09:25:02 +01:00
|
|
|
image.update(data)
|
|
|
|
yield Message.Url, url, text.nameext_from_url(url, image)
|
|
|
|
|
|
|
|
def metadata(self):
|
|
|
|
"""Collect metadata for extractor-job"""
|
|
|
|
return {}
|
|
|
|
|
|
|
|
def posts(self):
|
|
|
|
"""Return all relevant post-objects"""
|
|
|
|
return self._pagination(self.url)
|
|
|
|
|
|
|
|
def _pagination(self, url):
|
|
|
|
while True:
|
2019-01-07 18:04:16 +01:00
|
|
|
time.sleep(random.uniform(self.wait_min, self.wait_max))
|
2019-01-09 14:29:53 +01:00
|
|
|
|
|
|
|
response = self.request(url)
|
|
|
|
if response.history:
|
|
|
|
# sometimes there is a redirect from
|
|
|
|
# the last page of a listing (.../tag/<tag>/1)
|
|
|
|
# to the first page (.../tag/<tag>)
|
|
|
|
# which could cause an endless loop
|
|
|
|
cnt_old = response.history[0].url.count("/")
|
|
|
|
cnt_new = response.url.count("/")
|
|
|
|
if cnt_old == 5 and cnt_new == 4:
|
|
|
|
return
|
|
|
|
page = response.text
|
2018-11-23 09:25:02 +01:00
|
|
|
|
|
|
|
yield from text.extract_iter(
|
|
|
|
page, '<div class="uhead">', '<div class="ufoot">')
|
|
|
|
|
2019-01-03 22:11:38 +01:00
|
|
|
try:
|
|
|
|
pos = page.index("class='next'")
|
|
|
|
pos = page.rindex("class='current'", 0, pos)
|
|
|
|
url = self.root + text.extract(page, "href='", "'", pos)[0]
|
|
|
|
except (ValueError, TypeError):
|
2018-11-23 09:25:02 +01:00
|
|
|
return
|
|
|
|
|
2019-01-03 16:27:53 +01:00
|
|
|
def _parse_post(self, post):
|
2018-11-23 09:25:02 +01:00
|
|
|
post, _, script = post.partition('<script type="application/ld+json">')
|
|
|
|
images = text.extract_iter(post, '<div class="image">', '</div>')
|
2018-11-23 14:32:37 +01:00
|
|
|
script = script[:script.index("</")].strip()
|
|
|
|
|
|
|
|
try:
|
|
|
|
data = json.loads(script)
|
|
|
|
except ValueError:
|
2019-01-03 16:27:53 +01:00
|
|
|
try:
|
2019-01-07 16:59:26 +01:00
|
|
|
# remove control characters and escape backslashes
|
2019-01-03 16:27:53 +01:00
|
|
|
mapping = dict.fromkeys(range(32))
|
|
|
|
script = script.translate(mapping).replace("\\", "\\\\")
|
|
|
|
data = json.loads(script)
|
|
|
|
except ValueError as exc:
|
2019-01-03 22:11:38 +01:00
|
|
|
self.log.warning("Unable to parse JSON data: %s", exc)
|
2019-01-03 16:27:53 +01:00
|
|
|
return
|
2018-11-23 09:25:02 +01:00
|
|
|
|
|
|
|
num = 0
|
2019-05-09 22:50:25 +02:00
|
|
|
date = text.parse_datetime(data["datePublished"])
|
2018-11-23 09:25:02 +01:00
|
|
|
user = data["author"]["name"]
|
|
|
|
description = text.unescape(data["description"])
|
|
|
|
title, _, tags = text.unescape(data["headline"]).partition(" / ")
|
|
|
|
post_id = text.parse_int(
|
|
|
|
data["mainEntityOfPage"]["@id"].rpartition("/")[2])
|
|
|
|
|
|
|
|
if not tags:
|
|
|
|
title, tags = tags, title
|
2019-05-09 22:50:25 +02:00
|
|
|
tags = tags.split(" :: ")
|
2020-08-15 18:22:31 +02:00
|
|
|
tags.sort()
|
2018-11-23 09:25:02 +01:00
|
|
|
|
|
|
|
for image in images:
|
|
|
|
url = text.extract(image, ' src="', '"')[0]
|
|
|
|
if not url:
|
|
|
|
continue
|
2019-08-16 14:07:22 +02:00
|
|
|
if url.startswith("//"):
|
|
|
|
url = "http:" + url
|
2018-11-23 09:25:02 +01:00
|
|
|
width = text.extract(image, ' width="', '"')[0]
|
|
|
|
height = text.extract(image, ' height="', '"')[0]
|
|
|
|
image_id = url.rpartition("-")[2].partition(".")[0]
|
|
|
|
num += 1
|
|
|
|
|
|
|
|
if image.startswith("<iframe "): # embed
|
|
|
|
url = "ytdl:" + text.unescape(url)
|
2019-05-09 22:50:25 +02:00
|
|
|
elif "/post/webm/" not in url and "/post/mp4/" not in url:
|
2019-03-30 22:14:57 +01:00
|
|
|
url = url.replace("/post/", "/post/full/")
|
2018-11-23 09:25:02 +01:00
|
|
|
|
|
|
|
yield {
|
2019-05-09 22:50:25 +02:00
|
|
|
"url": url,
|
2018-11-23 09:25:02 +01:00
|
|
|
"post_id": post_id,
|
|
|
|
"image_id": text.parse_int(image_id),
|
|
|
|
"width": text.parse_int(width),
|
|
|
|
"height": text.parse_int(height),
|
|
|
|
"title": title,
|
|
|
|
"description": description,
|
|
|
|
"tags": tags,
|
|
|
|
"date": date,
|
|
|
|
"user": user,
|
|
|
|
"num": num,
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-01-07 16:59:26 +01:00
|
|
|
class ReactorTagExtractor(ReactorExtractor):
|
|
|
|
"""Extractor for tag searches on *reactor.cc sites"""
|
2018-11-23 09:25:02 +01:00
|
|
|
subcategory = "tag"
|
2019-02-08 13:45:40 +01:00
|
|
|
directory_fmt = ("{category}", "{search_tags}")
|
2018-11-23 09:25:02 +01:00
|
|
|
archive_fmt = "{search_tags}_{post_id}_{num}"
|
2020-10-22 23:12:59 +02:00
|
|
|
pattern = BASE_PATTERN + r"/tag/([^/?#]+)"
|
2019-02-08 13:45:40 +01:00
|
|
|
test = ("http://anime.reactor.cc/tag/Anime+Art",)
|
2018-11-23 09:25:02 +01:00
|
|
|
|
|
|
|
def __init__(self, match):
|
2019-01-07 16:59:26 +01:00
|
|
|
ReactorExtractor.__init__(self, match)
|
2018-11-23 09:25:02 +01:00
|
|
|
self.tag = match.group(2)
|
|
|
|
|
|
|
|
def metadata(self):
|
|
|
|
return {"search_tags": text.unescape(self.tag).replace("+", " ")}
|
|
|
|
|
|
|
|
|
2019-01-07 16:59:26 +01:00
|
|
|
class ReactorSearchExtractor(ReactorTagExtractor):
|
|
|
|
"""Extractor for search results on *reactor.cc sites"""
|
2019-01-03 11:53:46 +01:00
|
|
|
subcategory = "search"
|
2019-02-08 13:45:40 +01:00
|
|
|
directory_fmt = ("{category}", "search", "{search_tags}")
|
2019-01-03 11:53:46 +01:00
|
|
|
archive_fmt = "s_{search_tags}_{post_id}_{num}"
|
2020-10-22 23:12:59 +02:00
|
|
|
pattern = BASE_PATTERN + r"/search(?:/|\?q=)([^/?#]+)"
|
2019-02-08 13:45:40 +01:00
|
|
|
test = ("http://anime.reactor.cc/search?q=Art",)
|
2019-01-07 16:59:26 +01:00
|
|
|
|
|
|
|
|
|
|
|
class ReactorUserExtractor(ReactorExtractor):
|
|
|
|
"""Extractor for all posts of a user on *reactor.cc sites"""
|
|
|
|
subcategory = "user"
|
2019-02-08 13:45:40 +01:00
|
|
|
directory_fmt = ("{category}", "user", "{user}")
|
2020-10-22 23:12:59 +02:00
|
|
|
pattern = BASE_PATTERN + r"/user/([^/?#]+)"
|
2019-02-08 13:45:40 +01:00
|
|
|
test = ("http://anime.reactor.cc/user/Shuster",)
|
2019-01-07 16:59:26 +01:00
|
|
|
|
|
|
|
def __init__(self, match):
|
|
|
|
ReactorExtractor.__init__(self, match)
|
|
|
|
self.user = match.group(2)
|
|
|
|
|
|
|
|
def metadata(self):
|
|
|
|
return {"user": text.unescape(self.user).replace("+", " ")}
|
|
|
|
|
|
|
|
|
|
|
|
class ReactorPostExtractor(ReactorExtractor):
|
|
|
|
"""Extractor for single posts on *reactor.cc sites"""
|
|
|
|
subcategory = "post"
|
2019-02-08 13:45:40 +01:00
|
|
|
pattern = BASE_PATTERN + r"/post/(\d+)"
|
|
|
|
test = ("http://anime.reactor.cc/post/3576250",)
|
2019-01-07 16:59:26 +01:00
|
|
|
|
|
|
|
def __init__(self, match):
|
|
|
|
ReactorExtractor.__init__(self, match)
|
|
|
|
self.post_id = match.group(2)
|
|
|
|
|
|
|
|
def items(self):
|
|
|
|
yield Message.Version, 1
|
|
|
|
post = self.request(self.url).text
|
|
|
|
pos = post.find('class="uhead">')
|
|
|
|
for image in self._parse_post(post[pos:]):
|
|
|
|
if image["num"] == 1:
|
|
|
|
yield Message.Directory, image
|
2019-05-09 22:50:25 +02:00
|
|
|
url = image["url"]
|
2019-01-07 16:59:26 +01:00
|
|
|
yield Message.Url, url, text.nameext_from_url(url, image)
|
|
|
|
|
|
|
|
|
|
|
|
# --------------------------------------------------------------------
|
|
|
|
# JoyReactor
|
|
|
|
|
|
|
|
JR_BASE_PATTERN = r"(?:https?://)?(?:www\.)?(joyreactor\.c(?:c|om))"
|
|
|
|
|
|
|
|
|
|
|
|
class JoyreactorTagExtractor(ReactorTagExtractor):
|
|
|
|
"""Extractor for tag searches on joyreactor.cc"""
|
|
|
|
category = "joyreactor"
|
2020-10-22 23:12:59 +02:00
|
|
|
pattern = JR_BASE_PATTERN + r"/tag/([^/?#]+)"
|
2019-02-08 13:45:40 +01:00
|
|
|
test = (
|
2019-01-09 14:21:19 +01:00
|
|
|
("http://joyreactor.cc/tag/Advent+Cirno", {
|
|
|
|
"count": ">= 17",
|
|
|
|
}),
|
2019-01-07 16:59:26 +01:00
|
|
|
("http://joyreactor.com/tag/Cirno", {
|
2019-05-09 22:50:25 +02:00
|
|
|
"url": "de1e60c15bfb07a0e9603b00dc3d05f60edc7914",
|
2019-01-07 16:59:26 +01:00
|
|
|
}),
|
2019-02-08 13:45:40 +01:00
|
|
|
)
|
2019-01-07 16:59:26 +01:00
|
|
|
|
|
|
|
|
|
|
|
class JoyreactorSearchExtractor(ReactorSearchExtractor):
|
|
|
|
"""Extractor for search results on joyreactor.cc"""
|
|
|
|
category = "joyreactor"
|
2020-10-22 23:12:59 +02:00
|
|
|
pattern = JR_BASE_PATTERN + r"/search(?:/|\?q=)([^/?#]+)"
|
2019-02-08 13:45:40 +01:00
|
|
|
test = (
|
2020-12-27 17:41:08 +01:00
|
|
|
("http://joyreactor.cc/search/Cirno", {
|
2019-01-03 22:11:38 +01:00
|
|
|
"range": "1-25",
|
|
|
|
"count": ">= 20",
|
2019-01-03 11:53:46 +01:00
|
|
|
}),
|
2020-12-27 17:41:08 +01:00
|
|
|
("http://joyreactor.com/search?q=Cirno", {
|
|
|
|
"range": "1-25",
|
|
|
|
"count": ">= 20",
|
2019-01-09 14:21:19 +01:00
|
|
|
}),
|
2019-02-08 13:45:40 +01:00
|
|
|
)
|
2019-01-03 11:53:46 +01:00
|
|
|
|
|
|
|
|
2019-01-07 16:59:26 +01:00
|
|
|
class JoyreactorUserExtractor(ReactorUserExtractor):
|
2018-11-23 14:41:26 +01:00
|
|
|
"""Extractor for all posts of a user on joyreactor.cc"""
|
2019-01-07 16:59:26 +01:00
|
|
|
category = "joyreactor"
|
2020-10-22 23:12:59 +02:00
|
|
|
pattern = JR_BASE_PATTERN + r"/user/([^/?#]+)"
|
2019-02-08 13:45:40 +01:00
|
|
|
test = (
|
|
|
|
("http://joyreactor.cc/user/hemantic"),
|
2018-11-23 09:25:02 +01:00
|
|
|
("http://joyreactor.com/user/Tacoman123", {
|
2019-03-30 22:14:57 +01:00
|
|
|
"url": "452cd0fa23e2ad0e122c296ba75aa7f0b29329f6",
|
2018-11-23 09:25:02 +01:00
|
|
|
}),
|
2019-02-08 13:45:40 +01:00
|
|
|
)
|
2018-11-23 09:25:02 +01:00
|
|
|
|
|
|
|
|
2019-01-07 16:59:26 +01:00
|
|
|
class JoyreactorPostExtractor(ReactorPostExtractor):
|
2018-11-23 14:41:26 +01:00
|
|
|
"""Extractor for single posts on joyreactor.cc"""
|
2019-01-07 16:59:26 +01:00
|
|
|
category = "joyreactor"
|
2019-02-08 13:45:40 +01:00
|
|
|
pattern = JR_BASE_PATTERN + r"/post/(\d+)"
|
|
|
|
test = (
|
2018-11-23 09:25:02 +01:00
|
|
|
("http://joyreactor.com/post/3721876", { # single image
|
2019-03-30 22:14:57 +01:00
|
|
|
"url": "6ce09f239d8b7fdf6dd1664c2afc39618cc87663",
|
2020-08-15 18:22:31 +02:00
|
|
|
"keyword": "147ed5b9799ba43cbd16168450afcfae5ddedbf3",
|
2018-11-23 09:25:02 +01:00
|
|
|
}),
|
|
|
|
("http://joyreactor.com/post/3713804", { # 4 images
|
2019-03-30 22:14:57 +01:00
|
|
|
"url": "f08ac8493ca0619a3e3c6bedb8d8374af3eec304",
|
2020-08-15 18:22:31 +02:00
|
|
|
"keyword": "f12c6f3c2f298fed9b12bd3e70fb823870aa9b93",
|
2018-11-23 09:25:02 +01:00
|
|
|
}),
|
|
|
|
("http://joyreactor.com/post/3726210", { # gif / video
|
2019-05-09 22:50:25 +02:00
|
|
|
"url": "33a48e1eca6cb2d298fbbb6536b3283799d6515b",
|
2020-08-15 18:22:31 +02:00
|
|
|
"keyword": "d173cc6e88f02a63904e475eacd7050304eb1967",
|
2018-11-23 09:25:02 +01:00
|
|
|
}),
|
|
|
|
("http://joyreactor.com/post/3668724", { # youtube embed
|
2019-08-16 14:07:22 +02:00
|
|
|
"url": "bf1666eddcff10c9b58f6be63fa94e4e13074214",
|
2020-08-15 18:22:31 +02:00
|
|
|
"keyword": "e18b1ffbd79d76f9a0e90b6d474cc2499e343f0b",
|
2018-11-23 09:25:02 +01:00
|
|
|
}),
|
|
|
|
("http://joyreactor.cc/post/1299", { # "malformed" JSON
|
2019-03-30 22:14:57 +01:00
|
|
|
"url": "ac900743ed7cf1baf3db3b531c3bc414bf1ffcde",
|
2018-11-23 09:25:02 +01:00
|
|
|
}),
|
2019-02-08 13:45:40 +01:00
|
|
|
)
|
2018-11-23 09:25:02 +01:00
|
|
|
|
|
|
|
|
2019-01-07 16:59:26 +01:00
|
|
|
# --------------------------------------------------------------------
|
|
|
|
# PornReactor
|
|
|
|
|
|
|
|
PR_BASE_PATTERN = r"(?:https?://)?(?:www\.)?(pornreactor\.cc|fapreactor.com)"
|
|
|
|
|
|
|
|
|
|
|
|
class PornreactorTagExtractor(ReactorTagExtractor):
|
|
|
|
"""Extractor for tag searches on pornreactor.cc"""
|
|
|
|
category = "pornreactor"
|
2020-10-22 23:12:59 +02:00
|
|
|
pattern = PR_BASE_PATTERN + r"/tag/([^/?#]+)"
|
2019-02-08 13:45:40 +01:00
|
|
|
test = (
|
2019-01-07 16:59:26 +01:00
|
|
|
("http://pornreactor.cc/tag/RiceGnat", {
|
2019-02-05 10:30:44 +01:00
|
|
|
"range": "1-25",
|
|
|
|
"count": ">= 25",
|
2019-01-07 16:59:26 +01:00
|
|
|
}),
|
2019-02-08 13:45:40 +01:00
|
|
|
("http://fapreactor.com/tag/RiceGnat"),
|
|
|
|
)
|
2019-01-07 16:59:26 +01:00
|
|
|
|
|
|
|
|
|
|
|
class PornreactorSearchExtractor(ReactorSearchExtractor):
|
|
|
|
"""Extractor for search results on pornreactor.cc"""
|
|
|
|
category = "pornreactor"
|
2020-10-22 23:12:59 +02:00
|
|
|
pattern = PR_BASE_PATTERN + r"/search(?:/|\?q=)([^/?#]+)"
|
2019-02-08 13:45:40 +01:00
|
|
|
test = (
|
2019-01-07 16:59:26 +01:00
|
|
|
("http://pornreactor.cc/search?q=ecchi+hentai", {
|
|
|
|
"range": "1-25",
|
2019-02-05 10:30:44 +01:00
|
|
|
"count": ">= 25",
|
2019-01-07 16:59:26 +01:00
|
|
|
}),
|
2019-02-08 13:45:40 +01:00
|
|
|
("http://fapreactor.com/search/ecchi+hentai"),
|
|
|
|
)
|
2019-01-07 16:59:26 +01:00
|
|
|
|
|
|
|
|
|
|
|
class PornreactorUserExtractor(ReactorUserExtractor):
|
|
|
|
"""Extractor for all posts of a user on pornreactor.cc"""
|
|
|
|
category = "pornreactor"
|
2020-10-22 23:12:59 +02:00
|
|
|
pattern = PR_BASE_PATTERN + r"/user/([^/?#]+)"
|
2019-02-08 13:45:40 +01:00
|
|
|
test = (
|
2019-01-07 16:59:26 +01:00
|
|
|
("http://pornreactor.cc/user/Disillusion", {
|
2019-02-05 10:30:44 +01:00
|
|
|
"range": "1-25",
|
|
|
|
"count": ">= 25",
|
2019-01-07 16:59:26 +01:00
|
|
|
}),
|
2019-02-08 13:45:40 +01:00
|
|
|
("http://fapreactor.com/user/Disillusion"),
|
|
|
|
)
|
2019-01-07 16:59:26 +01:00
|
|
|
|
|
|
|
|
|
|
|
class PornreactorPostExtractor(ReactorPostExtractor):
|
|
|
|
"""Extractor for single posts on pornreactor.cc"""
|
|
|
|
category = "pornreactor"
|
|
|
|
subcategory = "post"
|
2019-02-08 13:45:40 +01:00
|
|
|
pattern = PR_BASE_PATTERN + r"/post/(\d+)"
|
|
|
|
test = (
|
2019-01-07 16:59:26 +01:00
|
|
|
("http://pornreactor.cc/post/863166", {
|
2019-03-30 22:14:57 +01:00
|
|
|
"url": "680db1e33ca92ff70b2c0e1708c471cbe2201324",
|
|
|
|
"content": "ec6b0568bfb1803648744077da082d14de844340",
|
2019-01-07 16:59:26 +01:00
|
|
|
}),
|
|
|
|
("http://fapreactor.com/post/863166", {
|
2019-03-30 22:14:57 +01:00
|
|
|
"url": "864ecd5785e4898301aa8d054dd653b1165be158",
|
2019-01-07 16:59:26 +01:00
|
|
|
}),
|
2019-02-08 13:45:40 +01:00
|
|
|
)
|