2018-11-27 15:44:53 +01:00
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
|
2023-02-07 23:14:53 +01:00
|
|
|
# Copyright 2018-2023 Mike Fährmann
|
2018-11-27 15:44:53 +01:00
|
|
|
#
|
|
|
|
# This program is free software; you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License version 2 as
|
|
|
|
# published by the Free Software Foundation.
|
|
|
|
|
|
|
|
"""Extractors for https://www.newgrounds.com/"""
|
|
|
|
|
|
|
|
from .common import Extractor, Message
|
2023-02-07 23:14:53 +01:00
|
|
|
from .. import text, util, exception
|
2019-11-15 23:54:07 +01:00
|
|
|
from ..cache import cache
|
2020-03-10 01:07:09 +01:00
|
|
|
import itertools
|
2024-06-12 20:32:01 +02:00
|
|
|
import re
|
2018-11-27 15:44:53 +01:00
|
|
|
|
2024-10-14 14:12:20 +02:00
|
|
|
BASE_PATTERN = r"(?:https?://)?(?:www\.)?newgrounds\.com"
|
|
|
|
USER_PATTERN = r"(?:https?://)?([\w-]+)\.newgrounds\.com"
|
|
|
|
|
2018-11-27 15:44:53 +01:00
|
|
|
|
|
|
|
class NewgroundsExtractor(Extractor):
|
|
|
|
"""Base class for newgrounds extractors"""
|
|
|
|
category = "newgrounds"
|
2019-11-16 23:51:02 +01:00
|
|
|
directory_fmt = ("{category}", "{artist[:10]:J, }")
|
2020-10-08 22:05:44 +02:00
|
|
|
filename_fmt = "{category}_{_index}_{title}.{extension}"
|
2023-03-06 15:03:49 +01:00
|
|
|
archive_fmt = "{_type}{_index}"
|
2019-11-15 23:54:07 +01:00
|
|
|
root = "https://www.newgrounds.com"
|
2023-07-21 22:38:39 +02:00
|
|
|
cookies_domain = ".newgrounds.com"
|
|
|
|
cookies_names = ("NG_GG_username", "vmk1du5I8m")
|
2023-12-18 22:06:26 +01:00
|
|
|
request_interval = (0.5, 1.5)
|
2018-11-27 15:44:53 +01:00
|
|
|
|
|
|
|
def __init__(self, match):
|
2019-02-11 13:31:10 +01:00
|
|
|
Extractor.__init__(self, match)
|
2018-11-27 15:44:53 +01:00
|
|
|
self.user = match.group(1)
|
2019-11-15 23:54:07 +01:00
|
|
|
self.user_root = "https://{}.newgrounds.com".format(self.user)
|
2023-07-25 20:09:44 +02:00
|
|
|
|
|
|
|
def _init(self):
|
2024-09-30 08:20:50 +02:00
|
|
|
self._extract_comment_urls = re.compile(
|
|
|
|
r'(?:<img |data-smartload-)src="([^"]+)').findall
|
2021-01-19 17:43:55 +01:00
|
|
|
self.flash = self.config("flash", True)
|
2018-11-27 15:44:53 +01:00
|
|
|
|
2024-06-12 20:32:01 +02:00
|
|
|
fmt = self.config("format")
|
|
|
|
if not fmt or fmt == "original":
|
|
|
|
self.format = ("mp4", "webm", "m4v", "mov", "mkv",
|
|
|
|
1080, 720, 360)
|
|
|
|
elif isinstance(fmt, (list, tuple)):
|
|
|
|
self.format = fmt
|
|
|
|
else:
|
|
|
|
self._video_formats = self._video_formats_limit
|
|
|
|
self.format = (fmt if isinstance(fmt, int) else
|
|
|
|
text.parse_int(fmt.rstrip("p")))
|
2021-07-29 19:11:20 +02:00
|
|
|
|
2018-11-27 15:44:53 +01:00
|
|
|
def items(self):
|
2019-11-15 23:54:07 +01:00
|
|
|
self.login()
|
2022-01-06 19:32:39 +01:00
|
|
|
metadata = self.metadata()
|
2018-11-27 15:44:53 +01:00
|
|
|
|
2019-11-14 23:17:14 +01:00
|
|
|
for post_url in self.posts():
|
2019-11-16 23:51:02 +01:00
|
|
|
try:
|
2020-03-10 01:49:59 +01:00
|
|
|
post = self.extract_post(post_url)
|
|
|
|
url = post.get("url")
|
2024-09-19 14:50:08 +02:00
|
|
|
except Exception as exc:
|
|
|
|
self.log.debug("", exc_info=exc)
|
2019-11-16 23:51:02 +01:00
|
|
|
url = None
|
2020-03-10 01:49:59 +01:00
|
|
|
|
|
|
|
if url:
|
2022-01-06 19:32:39 +01:00
|
|
|
if metadata:
|
|
|
|
post.update(metadata)
|
2020-03-10 01:49:59 +01:00
|
|
|
yield Message.Directory, post
|
2023-10-21 13:22:55 +02:00
|
|
|
post["num"] = 0
|
2020-03-10 01:49:59 +01:00
|
|
|
yield Message.Url, url, text.nameext_from_url(url, post)
|
2020-10-08 22:05:44 +02:00
|
|
|
|
2023-10-21 13:22:55 +02:00
|
|
|
if "_multi" in post:
|
|
|
|
for data in post["_multi"]:
|
|
|
|
post["num"] += 1
|
|
|
|
post["_index"] = "{}_{:>02}".format(
|
|
|
|
post["index"], post["num"])
|
|
|
|
post.update(data)
|
|
|
|
url = data["image"]
|
|
|
|
|
|
|
|
text.nameext_from_url(url, post)
|
|
|
|
yield Message.Url, url, post
|
|
|
|
|
|
|
|
if "_fallback" in post:
|
|
|
|
del post["_fallback"]
|
|
|
|
|
2024-09-30 08:20:50 +02:00
|
|
|
for url in self._extract_comment_urls(post["_comment"]):
|
2023-10-21 13:22:55 +02:00
|
|
|
post["num"] += 1
|
|
|
|
post["_index"] = "{}_{:>02}".format(
|
|
|
|
post["index"], post["num"])
|
2020-10-12 17:24:07 +02:00
|
|
|
url = text.ensure_http_scheme(url)
|
2023-10-13 19:52:08 +02:00
|
|
|
text.nameext_from_url(url, post)
|
|
|
|
yield Message.Url, url, post
|
2020-03-10 01:49:59 +01:00
|
|
|
else:
|
|
|
|
self.log.warning(
|
|
|
|
"Unable to get download URL for '%s'", post_url)
|
2018-11-27 15:44:53 +01:00
|
|
|
|
2019-11-14 23:17:14 +01:00
|
|
|
def posts(self):
|
2022-01-06 19:32:39 +01:00
|
|
|
"""Return URLs of all relevant post pages"""
|
2024-10-14 14:12:20 +02:00
|
|
|
return self._pagination(self._path, self.groups[1])
|
2018-11-27 15:44:53 +01:00
|
|
|
|
2022-01-06 19:32:39 +01:00
|
|
|
def metadata(self):
|
|
|
|
"""Return general metadata"""
|
|
|
|
|
2019-11-15 23:54:07 +01:00
|
|
|
def login(self):
|
2023-07-21 22:38:39 +02:00
|
|
|
if self.cookies_check(self.cookies_names):
|
2022-06-29 11:46:07 +02:00
|
|
|
return
|
2023-07-21 22:38:39 +02:00
|
|
|
|
2019-11-15 23:54:07 +01:00
|
|
|
username, password = self._get_auth_info()
|
|
|
|
if username:
|
2023-07-21 22:38:39 +02:00
|
|
|
self.cookies_update(self._login_impl(username, password))
|
2019-11-15 23:54:07 +01:00
|
|
|
|
2023-12-18 23:19:44 +01:00
|
|
|
@cache(maxage=365*86400, keyarg=1)
|
2019-11-15 23:54:07 +01:00
|
|
|
def _login_impl(self, username, password):
|
|
|
|
self.log.info("Logging in as %s", username)
|
|
|
|
|
2024-05-23 15:01:42 +02:00
|
|
|
url = self.root + "/passport"
|
2022-06-29 11:46:07 +02:00
|
|
|
response = self.request(url)
|
|
|
|
if response.history and response.url.endswith("/social"):
|
2023-07-21 22:38:39 +02:00
|
|
|
return self.cookies
|
2019-11-15 23:54:07 +01:00
|
|
|
|
2023-07-04 21:49:57 +02:00
|
|
|
page = response.text
|
2024-05-23 15:01:42 +02:00
|
|
|
headers = {
|
|
|
|
"Accept": "application/json, text/javascript, */*; q=0.01",
|
|
|
|
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
|
|
|
|
"X-Requested-With": "XMLHttpRequest",
|
|
|
|
"Origin": self.root,
|
|
|
|
"Referer": url,
|
|
|
|
}
|
2023-07-04 21:49:57 +02:00
|
|
|
url = text.urljoin(self.root, text.extr(page, 'action="', '"'))
|
2019-11-15 23:54:07 +01:00
|
|
|
data = {
|
2023-07-04 21:49:57 +02:00
|
|
|
"auth" : text.extr(page, 'name="auth" value="', '"'),
|
2024-05-23 15:01:42 +02:00
|
|
|
"remember": "1",
|
|
|
|
"username": username,
|
|
|
|
"password": str(password),
|
|
|
|
"code" : "",
|
|
|
|
"codehint": "------",
|
|
|
|
"mfaCheck": "1",
|
2019-11-15 23:54:07 +01:00
|
|
|
}
|
|
|
|
|
2024-05-23 15:01:42 +02:00
|
|
|
while True:
|
|
|
|
response = self.request(
|
|
|
|
url, method="POST", headers=headers, data=data)
|
|
|
|
result = response.json()
|
|
|
|
|
|
|
|
if result.get("success"):
|
|
|
|
break
|
|
|
|
if "errors" in result:
|
|
|
|
raise exception.AuthenticationError(
|
|
|
|
'"' + '", "'.join(result["errors"]) + '"')
|
|
|
|
|
|
|
|
if result.get("requiresMfa"):
|
|
|
|
data["code"] = self.input("Verification Code: ")
|
|
|
|
data["codehint"] = " "
|
|
|
|
elif result.get("requiresEmailMfa"):
|
|
|
|
email = result.get("obfuscatedEmail")
|
|
|
|
prompt = "Email Verification Code ({}): ".format(email)
|
|
|
|
data["code"] = self.input(prompt)
|
|
|
|
data["codehint"] = " "
|
|
|
|
|
|
|
|
data.pop("mfaCheck", None)
|
2019-11-15 23:54:07 +01:00
|
|
|
|
|
|
|
return {
|
|
|
|
cookie.name: cookie.value
|
2024-05-23 15:01:42 +02:00
|
|
|
for cookie in response.cookies
|
2019-11-15 23:54:07 +01:00
|
|
|
}
|
|
|
|
|
2019-11-16 23:51:02 +01:00
|
|
|
def extract_post(self, post_url):
|
2022-02-23 00:00:23 +01:00
|
|
|
url = post_url
|
2021-01-19 17:43:55 +01:00
|
|
|
if "/art/view/" in post_url:
|
|
|
|
extract_data = self._extract_image_data
|
|
|
|
elif "/audio/listen/" in post_url:
|
|
|
|
extract_data = self._extract_audio_data
|
|
|
|
else:
|
|
|
|
extract_data = self._extract_media_data
|
|
|
|
if self.flash:
|
2022-02-23 00:00:23 +01:00
|
|
|
url += "/format/flash"
|
2021-01-19 17:43:55 +01:00
|
|
|
|
2024-08-13 07:40:44 +02:00
|
|
|
response = self.request(url, fatal=False)
|
|
|
|
page = response.text
|
2022-03-30 16:14:43 +02:00
|
|
|
|
|
|
|
pos = page.find('id="adults_only"')
|
|
|
|
if pos >= 0:
|
|
|
|
msg = text.extract(page, 'class="highlight">', '<', pos)[0]
|
|
|
|
self.log.warning('"%s"', msg)
|
2024-08-13 07:40:44 +02:00
|
|
|
return {}
|
|
|
|
|
|
|
|
if response.status_code >= 400:
|
|
|
|
return {}
|
2022-03-30 16:14:43 +02:00
|
|
|
|
2019-11-16 23:51:02 +01:00
|
|
|
extr = text.extract_from(page)
|
2021-01-19 17:43:55 +01:00
|
|
|
data = extract_data(extr, post_url)
|
2019-11-16 23:51:02 +01:00
|
|
|
|
2022-02-23 21:42:28 +01:00
|
|
|
data["_comment"] = extr(
|
|
|
|
'id="author_comments"', '</div>').partition(">")[2]
|
2020-10-08 22:05:44 +02:00
|
|
|
data["comment"] = text.unescape(text.remove_html(
|
2022-02-23 21:42:28 +01:00
|
|
|
data["_comment"], "", ""))
|
2019-11-16 23:51:02 +01:00
|
|
|
data["favorites"] = text.parse_int(extr(
|
|
|
|
'id="faves_load">', '<').replace(",", ""))
|
|
|
|
data["score"] = text.parse_float(extr('id="score_number">', '<'))
|
2020-03-10 01:49:59 +01:00
|
|
|
data["tags"] = text.split_html(extr('<dd class="tags">', '</dd>'))
|
2019-11-16 23:51:02 +01:00
|
|
|
data["artist"] = [
|
2022-11-04 23:39:38 +01:00
|
|
|
text.extr(user, '//', '.')
|
2019-11-16 23:51:02 +01:00
|
|
|
for user in text.extract_iter(page, '<div class="item-user">', '>')
|
|
|
|
]
|
|
|
|
|
|
|
|
data["tags"].sort()
|
|
|
|
data["user"] = self.user or data["artist"][0]
|
2022-02-23 00:00:23 +01:00
|
|
|
data["post_url"] = post_url
|
2019-11-16 23:51:02 +01:00
|
|
|
return data
|
|
|
|
|
2023-10-21 13:22:55 +02:00
|
|
|
def _extract_image_data(self, extr, url):
|
2023-02-07 23:14:53 +01:00
|
|
|
full = text.extract_from(util.json_loads(extr(
|
|
|
|
'"full_image_text":', '});')))
|
2019-11-16 23:51:02 +01:00
|
|
|
data = {
|
|
|
|
"title" : text.unescape(extr('"og:title" content="', '"')),
|
|
|
|
"description": text.unescape(extr(':description" content="', '"')),
|
2022-09-24 20:29:43 +02:00
|
|
|
"type" : extr('og:type" content="', '"'),
|
2023-03-06 15:03:49 +01:00
|
|
|
"_type" : "i",
|
2019-11-16 23:51:02 +01:00
|
|
|
"date" : text.parse_datetime(extr(
|
|
|
|
'itemprop="datePublished" content="', '"')),
|
|
|
|
"rating" : extr('class="rated-', '"'),
|
|
|
|
"url" : full('src="', '"'),
|
|
|
|
"width" : text.parse_int(full('width="', '"')),
|
|
|
|
"height" : text.parse_int(full('height="', '"')),
|
|
|
|
}
|
2020-10-08 22:05:44 +02:00
|
|
|
index = data["url"].rpartition("/")[2].partition("_")[0]
|
|
|
|
data["index"] = text.parse_int(index)
|
|
|
|
data["_index"] = index
|
2023-10-21 13:22:55 +02:00
|
|
|
|
|
|
|
image_data = extr("let imageData =", "\n];")
|
|
|
|
if image_data:
|
|
|
|
data["_multi"] = self._extract_images_multi(image_data)
|
|
|
|
else:
|
|
|
|
art_images = extr('<div class="art-images', '\n</div>')
|
|
|
|
if art_images:
|
|
|
|
data["_multi"] = self._extract_images_art(art_images, data)
|
|
|
|
|
2019-11-16 23:51:02 +01:00
|
|
|
return data
|
|
|
|
|
2023-10-21 13:22:55 +02:00
|
|
|
def _extract_images_multi(self, html):
|
|
|
|
data = util.json_loads(html + "]")
|
|
|
|
yield from data[1:]
|
|
|
|
|
|
|
|
def _extract_images_art(self, html, data):
|
|
|
|
ext = text.ext_from_url(data["url"])
|
|
|
|
for url in text.extract_iter(html, 'data-smartload-src="', '"'):
|
|
|
|
url = text.ensure_http_scheme(url)
|
|
|
|
url = url.replace("/medium_views/", "/images/", 1)
|
|
|
|
if text.ext_from_url(url) == "webp":
|
2024-10-01 08:01:45 +02:00
|
|
|
fallback = [url.replace(".webp", "." + e)
|
|
|
|
for e in ("jpg", "png", "gif") if e != ext]
|
|
|
|
fallback.append(url)
|
2023-10-21 13:22:55 +02:00
|
|
|
yield {
|
|
|
|
"image" : url.replace(".webp", "." + ext),
|
2024-10-01 08:01:45 +02:00
|
|
|
"_fallback": fallback,
|
2023-10-21 13:22:55 +02:00
|
|
|
}
|
|
|
|
else:
|
|
|
|
yield {"image": url}
|
|
|
|
|
2019-11-16 23:51:02 +01:00
|
|
|
@staticmethod
|
|
|
|
def _extract_audio_data(extr, url):
|
2020-10-08 22:05:44 +02:00
|
|
|
index = url.split("/")[5]
|
2019-11-16 23:51:02 +01:00
|
|
|
return {
|
|
|
|
"title" : text.unescape(extr('"og:title" content="', '"')),
|
|
|
|
"description": text.unescape(extr(':description" content="', '"')),
|
2022-09-24 20:29:43 +02:00
|
|
|
"type" : extr('og:type" content="', '"'),
|
2023-03-06 15:03:49 +01:00
|
|
|
"_type" : "a",
|
2019-11-16 23:51:02 +01:00
|
|
|
"date" : text.parse_datetime(extr(
|
|
|
|
'itemprop="datePublished" content="', '"')),
|
|
|
|
"url" : extr('{"url":"', '"').replace("\\/", "/"),
|
2020-10-08 22:05:44 +02:00
|
|
|
"index" : text.parse_int(index),
|
|
|
|
"_index" : index,
|
2019-11-16 23:51:02 +01:00
|
|
|
"rating" : "",
|
|
|
|
}
|
|
|
|
|
2020-10-01 20:07:25 +02:00
|
|
|
def _extract_media_data(self, extr, url):
|
|
|
|
index = url.split("/")[5]
|
|
|
|
title = extr('"og:title" content="', '"')
|
2022-09-24 20:29:43 +02:00
|
|
|
type = extr('og:type" content="', '"')
|
2022-03-12 04:09:02 +01:00
|
|
|
descr = extr('"og:description" content="', '"')
|
2020-10-01 20:07:25 +02:00
|
|
|
src = extr('{"url":"', '"')
|
|
|
|
|
|
|
|
if src:
|
|
|
|
src = src.replace("\\/", "/")
|
2024-06-12 20:32:01 +02:00
|
|
|
formats = ()
|
2020-10-01 20:07:25 +02:00
|
|
|
date = text.parse_datetime(extr(
|
|
|
|
'itemprop="datePublished" content="', '"'))
|
|
|
|
else:
|
|
|
|
url = self.root + "/portal/video/" + index
|
|
|
|
headers = {
|
|
|
|
"Accept": "application/json, text/javascript, */*; q=0.01",
|
|
|
|
"X-Requested-With": "XMLHttpRequest",
|
|
|
|
}
|
2020-10-16 01:16:12 +02:00
|
|
|
sources = self.request(url, headers=headers).json()["sources"]
|
2024-06-12 20:32:01 +02:00
|
|
|
formats = self._video_formats(sources)
|
|
|
|
src = next(formats, "")
|
2020-10-01 20:07:25 +02:00
|
|
|
date = text.parse_timestamp(src.rpartition("?")[2])
|
|
|
|
|
2019-11-16 23:51:02 +01:00
|
|
|
return {
|
2020-10-01 20:07:25 +02:00
|
|
|
"title" : text.unescape(title),
|
|
|
|
"url" : src,
|
|
|
|
"date" : date,
|
2022-09-24 20:29:43 +02:00
|
|
|
"type" : type,
|
2023-03-06 15:03:49 +01:00
|
|
|
"_type" : "",
|
2022-03-12 04:09:02 +01:00
|
|
|
"description": text.unescape(descr or extr(
|
2019-11-16 23:51:02 +01:00
|
|
|
'itemprop="description" content="', '"')),
|
|
|
|
"rating" : extr('class="rated-', '"'),
|
2020-10-01 20:07:25 +02:00
|
|
|
"index" : text.parse_int(index),
|
2020-10-08 22:05:44 +02:00
|
|
|
"_index" : index,
|
2024-06-12 20:32:01 +02:00
|
|
|
"_fallback" : formats,
|
2019-11-16 23:51:02 +01:00
|
|
|
}
|
|
|
|
|
2024-06-12 20:32:01 +02:00
|
|
|
def _video_formats(self, sources):
|
|
|
|
src = sources["360p"][0]["src"]
|
|
|
|
sub = re.compile(r"\.360p\.\w+").sub
|
|
|
|
|
|
|
|
for fmt in self.format:
|
|
|
|
try:
|
|
|
|
if isinstance(fmt, int):
|
|
|
|
yield sources[str(fmt) + "p"][0]["src"]
|
|
|
|
elif fmt in sources:
|
|
|
|
yield sources[fmt][0]["src"]
|
|
|
|
else:
|
|
|
|
yield sub("." + fmt, src, 1)
|
|
|
|
except Exception as exc:
|
|
|
|
self.log.debug("Video format '%s' not available (%s: %s)",
|
|
|
|
fmt, exc.__class__.__name__, exc)
|
|
|
|
|
|
|
|
def _video_formats_limit(self, sources):
|
|
|
|
formats = []
|
|
|
|
for fmt, src in sources.items():
|
|
|
|
width = text.parse_int(fmt.rstrip("p"))
|
|
|
|
if width <= self.format:
|
|
|
|
formats.append((width, src))
|
|
|
|
|
|
|
|
formats.sort(reverse=True)
|
2021-07-29 19:11:20 +02:00
|
|
|
for fmt in formats:
|
|
|
|
yield fmt[1][0]["src"]
|
2020-10-23 00:39:19 +02:00
|
|
|
|
2024-10-14 14:12:20 +02:00
|
|
|
def _pagination(self, kind, pnum=1):
|
2022-04-07 15:38:41 +02:00
|
|
|
url = "{}/{}".format(self.user_root, kind)
|
|
|
|
params = {
|
2024-10-14 14:12:20 +02:00
|
|
|
"page": text.parse_int(pnum, 1),
|
2022-04-07 15:38:41 +02:00
|
|
|
"isAjaxRequest": "1",
|
|
|
|
}
|
2018-11-27 15:44:53 +01:00
|
|
|
headers = {
|
2022-04-07 15:38:41 +02:00
|
|
|
"Referer": url,
|
2019-11-16 23:51:02 +01:00
|
|
|
"X-Requested-With": "XMLHttpRequest",
|
2018-11-27 15:44:53 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
while True:
|
2022-04-07 15:38:41 +02:00
|
|
|
with self.request(
|
|
|
|
url, params=params, headers=headers,
|
|
|
|
fatal=False) as response:
|
2019-11-16 23:51:02 +01:00
|
|
|
try:
|
|
|
|
data = response.json()
|
|
|
|
except ValueError:
|
|
|
|
return
|
|
|
|
if not data:
|
|
|
|
return
|
|
|
|
if "errors" in data:
|
|
|
|
msg = ", ".join(text.unescape(e) for e in data["errors"])
|
|
|
|
raise exception.StopExtraction(msg)
|
2018-11-27 15:44:53 +01:00
|
|
|
|
2022-07-03 11:44:46 +02:00
|
|
|
items = data.get("items")
|
|
|
|
if not items:
|
|
|
|
return
|
|
|
|
|
|
|
|
for year, items in items.items():
|
2022-04-07 15:38:41 +02:00
|
|
|
for item in items:
|
2022-11-04 23:39:38 +01:00
|
|
|
page_url = text.extr(item, 'href="', '"')
|
2022-04-07 15:38:41 +02:00
|
|
|
if page_url[0] == "/":
|
|
|
|
page_url = self.root + page_url
|
|
|
|
yield page_url
|
2018-11-27 15:44:53 +01:00
|
|
|
|
2022-04-07 15:38:41 +02:00
|
|
|
more = data.get("load_more")
|
|
|
|
if not more or len(more) < 8:
|
2018-11-27 15:44:53 +01:00
|
|
|
return
|
2022-04-07 15:38:41 +02:00
|
|
|
params["page"] += 1
|
2018-11-27 15:44:53 +01:00
|
|
|
|
|
|
|
|
2019-01-14 16:06:23 +01:00
|
|
|
class NewgroundsImageExtractor(NewgroundsExtractor):
|
2018-11-27 15:44:53 +01:00
|
|
|
"""Extractor for a single image from newgrounds.com"""
|
|
|
|
subcategory = "image"
|
2019-02-08 13:45:40 +01:00
|
|
|
pattern = (r"(?:https?://)?(?:"
|
2020-10-22 23:12:59 +02:00
|
|
|
r"(?:www\.)?newgrounds\.com/art/view/([^/?#]+)/[^/?#]+"
|
2019-02-08 13:45:40 +01:00
|
|
|
r"|art\.ngfiles\.com/images/\d+/\d+_([^_]+)_([^.]+))")
|
2023-09-11 16:30:55 +02:00
|
|
|
example = "https://www.newgrounds.com/art/view/USER/TITLE"
|
2018-11-27 15:44:53 +01:00
|
|
|
|
|
|
|
def __init__(self, match):
|
2019-01-14 16:06:23 +01:00
|
|
|
NewgroundsExtractor.__init__(self, match)
|
2019-01-25 16:35:12 +01:00
|
|
|
if match.group(2):
|
|
|
|
self.user = match.group(2)
|
2019-11-14 23:17:14 +01:00
|
|
|
self.post_url = "https://www.newgrounds.com/art/view/{}/{}".format(
|
2019-01-25 16:35:12 +01:00
|
|
|
self.user, match.group(3))
|
|
|
|
else:
|
2020-05-19 21:25:07 +02:00
|
|
|
self.post_url = text.ensure_http_scheme(match.group(0))
|
2019-11-16 23:51:02 +01:00
|
|
|
|
|
|
|
def posts(self):
|
|
|
|
return (self.post_url,)
|
|
|
|
|
|
|
|
|
|
|
|
class NewgroundsMediaExtractor(NewgroundsExtractor):
|
|
|
|
"""Extractor for a media file from newgrounds.com"""
|
|
|
|
subcategory = "media"
|
2024-10-14 14:12:20 +02:00
|
|
|
pattern = BASE_PATTERN + r"(/(?:portal/view|audio/listen)/\d+)"
|
2023-09-11 16:30:55 +02:00
|
|
|
example = "https://www.newgrounds.com/portal/view/12345"
|
2019-11-16 23:51:02 +01:00
|
|
|
|
|
|
|
def __init__(self, match):
|
|
|
|
NewgroundsExtractor.__init__(self, match)
|
|
|
|
self.user = ""
|
|
|
|
self.post_url = self.root + match.group(1)
|
2018-11-27 15:44:53 +01:00
|
|
|
|
2019-11-14 23:17:14 +01:00
|
|
|
def posts(self):
|
|
|
|
return (self.post_url,)
|
2018-11-27 15:44:53 +01:00
|
|
|
|
|
|
|
|
2019-11-16 23:51:02 +01:00
|
|
|
class NewgroundsArtExtractor(NewgroundsExtractor):
|
|
|
|
"""Extractor for all images of a newgrounds user"""
|
2021-07-29 15:54:04 +02:00
|
|
|
subcategory = _path = "art"
|
2024-10-14 14:12:20 +02:00
|
|
|
pattern = USER_PATTERN + r"/art(?:(?:/page/|/?\?page=)(\d+))?/?$"
|
2023-09-11 16:30:55 +02:00
|
|
|
example = "https://USER.newgrounds.com/art"
|
2019-11-16 23:51:02 +01:00
|
|
|
|
|
|
|
|
|
|
|
class NewgroundsAudioExtractor(NewgroundsExtractor):
|
|
|
|
"""Extractor for all audio submissions of a newgrounds user"""
|
2021-07-29 15:54:04 +02:00
|
|
|
subcategory = _path = "audio"
|
2024-10-14 14:12:20 +02:00
|
|
|
pattern = USER_PATTERN + r"/audio(?:(?:/page/|/?\?page=)(\d+))?/?$"
|
2023-09-11 16:30:55 +02:00
|
|
|
example = "https://USER.newgrounds.com/audio"
|
2019-11-16 23:51:02 +01:00
|
|
|
|
|
|
|
|
|
|
|
class NewgroundsMoviesExtractor(NewgroundsExtractor):
|
|
|
|
"""Extractor for all movies of a newgrounds user"""
|
2021-07-29 15:54:04 +02:00
|
|
|
subcategory = _path = "movies"
|
2024-10-14 14:12:20 +02:00
|
|
|
pattern = USER_PATTERN + r"/movies(?:(?:/page/|/?\?page=)(\d+))?/?$"
|
2023-09-14 13:27:03 +02:00
|
|
|
example = "https://USER.newgrounds.com/movies"
|
2018-11-27 15:44:53 +01:00
|
|
|
|
|
|
|
|
2022-09-24 12:34:37 +02:00
|
|
|
class NewgroundsGamesExtractor(NewgroundsExtractor):
|
|
|
|
"""Extractor for a newgrounds user's games"""
|
|
|
|
subcategory = _path = "games"
|
2024-10-14 14:12:20 +02:00
|
|
|
pattern = USER_PATTERN + r"/games(?:(?:/page/|/?\?page=)(\d+))?/?$"
|
2023-09-11 16:30:55 +02:00
|
|
|
example = "https://USER.newgrounds.com/games"
|
2022-09-24 12:34:37 +02:00
|
|
|
|
|
|
|
|
2019-11-16 23:51:02 +01:00
|
|
|
class NewgroundsUserExtractor(NewgroundsExtractor):
|
|
|
|
"""Extractor for a newgrounds user profile"""
|
|
|
|
subcategory = "user"
|
2024-10-14 14:12:20 +02:00
|
|
|
pattern = USER_PATTERN + r"/?$"
|
2023-09-11 16:30:55 +02:00
|
|
|
example = "https://USER.newgrounds.com"
|
2019-11-16 23:51:02 +01:00
|
|
|
|
2023-07-25 20:09:44 +02:00
|
|
|
def initialize(self):
|
|
|
|
pass
|
|
|
|
|
2019-11-16 23:51:02 +01:00
|
|
|
def items(self):
|
2019-11-22 23:20:21 +01:00
|
|
|
base = self.user_root + "/"
|
|
|
|
return self._dispatch_extractors((
|
|
|
|
(NewgroundsArtExtractor , base + "art"),
|
|
|
|
(NewgroundsAudioExtractor , base + "audio"),
|
2022-09-24 12:34:37 +02:00
|
|
|
(NewgroundsGamesExtractor , base + "games"),
|
2019-11-22 23:20:21 +01:00
|
|
|
(NewgroundsMoviesExtractor, base + "movies"),
|
|
|
|
), ("art",))
|
2020-03-10 01:07:09 +01:00
|
|
|
|
|
|
|
|
|
|
|
class NewgroundsFavoriteExtractor(NewgroundsExtractor):
|
|
|
|
"""Extractor for posts favorited by a newgrounds user"""
|
|
|
|
subcategory = "favorite"
|
|
|
|
directory_fmt = ("{category}", "{user}", "Favorites")
|
2024-10-14 14:12:20 +02:00
|
|
|
pattern = (USER_PATTERN + r"/favorites(?!/following)(?:/(art|audio|movies)"
|
|
|
|
r"(?:(?:/page/|/?\?page=)(\d+))?)?")
|
2023-09-11 16:30:55 +02:00
|
|
|
example = "https://USER.newgrounds.com/favorites"
|
2020-03-10 01:07:09 +01:00
|
|
|
|
|
|
|
def posts(self):
|
2024-10-14 14:12:20 +02:00
|
|
|
_, kind, pnum = self.groups
|
|
|
|
if kind:
|
|
|
|
return self._pagination_favorites(kind, pnum)
|
2020-03-10 01:07:09 +01:00
|
|
|
return itertools.chain.from_iterable(
|
2024-10-14 14:12:20 +02:00
|
|
|
self._pagination_favorites(k) for k in ("art", "audio", "movies")
|
2020-03-10 01:07:09 +01:00
|
|
|
)
|
|
|
|
|
2024-10-14 14:12:20 +02:00
|
|
|
def _pagination_favorites(self, kind, pnum=1):
|
2022-04-07 15:38:41 +02:00
|
|
|
url = "{}/favorites/{}".format(self.user_root, kind)
|
|
|
|
params = {
|
2024-10-14 14:12:20 +02:00
|
|
|
"page": text.parse_int(pnum, 1),
|
2022-04-07 15:38:41 +02:00
|
|
|
"isAjaxRequest": "1",
|
|
|
|
}
|
2020-03-10 01:07:09 +01:00
|
|
|
headers = {
|
2022-04-07 15:38:41 +02:00
|
|
|
"Referer": url,
|
2020-03-10 01:07:09 +01:00
|
|
|
"X-Requested-With": "XMLHttpRequest",
|
|
|
|
}
|
|
|
|
|
|
|
|
while True:
|
2022-04-07 15:38:41 +02:00
|
|
|
response = self.request(url, params=params, headers=headers)
|
2020-03-10 01:07:09 +01:00
|
|
|
if response.history:
|
|
|
|
return
|
|
|
|
|
2022-04-07 15:38:41 +02:00
|
|
|
data = response.json()
|
|
|
|
favs = self._extract_favorites(data.get("component") or "")
|
2020-04-16 23:24:08 +02:00
|
|
|
yield from favs
|
|
|
|
|
2020-03-10 01:07:09 +01:00
|
|
|
if len(favs) < 24:
|
|
|
|
return
|
2022-04-07 15:38:41 +02:00
|
|
|
params["page"] += 1
|
2020-04-16 23:24:08 +02:00
|
|
|
|
|
|
|
def _extract_favorites(self, page):
|
|
|
|
return [
|
|
|
|
self.root + path
|
|
|
|
for path in text.extract_iter(
|
2020-07-13 23:05:43 +02:00
|
|
|
page, 'href="https://www.newgrounds.com', '"')
|
2020-04-16 23:24:08 +02:00
|
|
|
]
|
|
|
|
|
|
|
|
|
|
|
|
class NewgroundsFollowingExtractor(NewgroundsFavoriteExtractor):
|
|
|
|
"""Extractor for a newgrounds user's favorited users"""
|
|
|
|
subcategory = "following"
|
2024-10-14 14:12:20 +02:00
|
|
|
pattern = USER_PATTERN + r"/favorites/(following)"
|
2023-09-11 16:30:55 +02:00
|
|
|
example = "https://USER.newgrounds.com/favorites/following"
|
2020-04-16 23:24:08 +02:00
|
|
|
|
|
|
|
def items(self):
|
2024-10-14 14:12:20 +02:00
|
|
|
_, kind, pnum = self.groups
|
2020-04-16 23:24:08 +02:00
|
|
|
data = {"_extractor": NewgroundsUserExtractor}
|
2024-10-14 14:12:20 +02:00
|
|
|
for url in self._pagination_favorites(kind, pnum):
|
2020-04-16 23:24:08 +02:00
|
|
|
yield Message.Queue, url, data
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def _extract_favorites(page):
|
|
|
|
return [
|
2020-05-19 21:25:07 +02:00
|
|
|
text.ensure_http_scheme(user.rpartition('"')[2])
|
2020-04-16 23:24:08 +02:00
|
|
|
for user in text.extract_iter(page, 'class="item-user', '"><img')
|
|
|
|
]
|
2022-01-06 19:32:39 +01:00
|
|
|
|
|
|
|
|
|
|
|
class NewgroundsSearchExtractor(NewgroundsExtractor):
|
|
|
|
"""Extractor for newgrounds.com search reesults"""
|
|
|
|
subcategory = "search"
|
|
|
|
directory_fmt = ("{category}", "search", "{search_tags}")
|
2024-10-14 14:12:20 +02:00
|
|
|
pattern = BASE_PATTERN + r"/search/conduct/([^/?#]+)/?\?([^#]+)"
|
2023-09-11 16:30:55 +02:00
|
|
|
example = "https://www.newgrounds.com/search/conduct/art?terms=QUERY"
|
2022-01-06 19:32:39 +01:00
|
|
|
|
|
|
|
def __init__(self, match):
|
|
|
|
NewgroundsExtractor.__init__(self, match)
|
2024-10-14 14:12:20 +02:00
|
|
|
self._path, query = self.groups
|
2022-01-06 19:32:39 +01:00
|
|
|
self.query = text.parse_query(query)
|
|
|
|
|
|
|
|
def posts(self):
|
2022-01-11 23:50:29 +01:00
|
|
|
suitabilities = self.query.get("suitabilities")
|
|
|
|
if suitabilities:
|
|
|
|
data = {"view_suitability_" + s: "on"
|
|
|
|
for s in suitabilities.split(",")}
|
|
|
|
self.request(self.root + "/suitabilities",
|
|
|
|
method="POST", data=data)
|
2024-10-14 14:12:20 +02:00
|
|
|
return self._pagination_search(
|
|
|
|
"/search/conduct/" + self._path, self.query)
|
2022-01-06 19:32:39 +01:00
|
|
|
|
|
|
|
def metadata(self):
|
|
|
|
return {"search_tags": self.query.get("terms", "")}
|
|
|
|
|
2024-10-14 14:12:20 +02:00
|
|
|
def _pagination_search(self, path, params):
|
2022-01-06 19:32:39 +01:00
|
|
|
url = self.root + path
|
2024-10-14 14:12:20 +02:00
|
|
|
params["inner"] = "1"
|
|
|
|
params["page"] = text.parse_int(params.get("page"), 1)
|
2022-01-06 19:32:39 +01:00
|
|
|
headers = {
|
|
|
|
"Accept": "application/json, text/javascript, */*; q=0.01",
|
|
|
|
"X-Requested-With": "XMLHttpRequest",
|
|
|
|
}
|
|
|
|
|
|
|
|
while True:
|
|
|
|
data = self.request(url, params=params, headers=headers).json()
|
|
|
|
|
|
|
|
post_url = None
|
|
|
|
for post_url in text.extract_iter(data["content"], 'href="', '"'):
|
|
|
|
if not post_url.startswith("/search/"):
|
|
|
|
yield post_url
|
|
|
|
|
|
|
|
if post_url is None:
|
|
|
|
return
|
|
|
|
params["page"] += 1
|