2018-11-27 15:44:53 +01:00
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
|
2020-02-23 16:48:30 +01:00
|
|
|
# Copyright 2018-2020 Mike Fährmann
|
2018-11-27 15:44:53 +01:00
|
|
|
#
|
|
|
|
# This program is free software; you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License version 2 as
|
|
|
|
# published by the Free Software Foundation.
|
|
|
|
|
|
|
|
"""Extractors for https://www.newgrounds.com/"""
|
|
|
|
|
|
|
|
from .common import Extractor, Message
|
2019-11-15 23:54:07 +01:00
|
|
|
from .. import text, exception
|
|
|
|
from ..cache import cache
|
2020-03-10 01:07:09 +01:00
|
|
|
import itertools
|
2018-11-27 15:44:53 +01:00
|
|
|
import json
|
|
|
|
|
|
|
|
|
|
|
|
class NewgroundsExtractor(Extractor):
|
|
|
|
"""Base class for newgrounds extractors"""
|
|
|
|
category = "newgrounds"
|
2019-11-16 23:51:02 +01:00
|
|
|
directory_fmt = ("{category}", "{artist[:10]:J, }")
|
2020-10-08 22:05:44 +02:00
|
|
|
filename_fmt = "{category}_{_index}_{title}.{extension}"
|
|
|
|
archive_fmt = "{_index}"
|
2019-11-15 23:54:07 +01:00
|
|
|
root = "https://www.newgrounds.com"
|
|
|
|
cookiedomain = ".newgrounds.com"
|
|
|
|
cookienames = ("NG_GG_username", "vmk1du5I8m")
|
2018-11-27 15:44:53 +01:00
|
|
|
|
|
|
|
def __init__(self, match):
|
2019-02-11 13:31:10 +01:00
|
|
|
Extractor.__init__(self, match)
|
2018-11-27 15:44:53 +01:00
|
|
|
self.user = match.group(1)
|
2019-11-15 23:54:07 +01:00
|
|
|
self.user_root = "https://{}.newgrounds.com".format(self.user)
|
2018-11-27 15:44:53 +01:00
|
|
|
|
|
|
|
def items(self):
|
2019-11-15 23:54:07 +01:00
|
|
|
self.login()
|
2018-11-27 15:44:53 +01:00
|
|
|
yield Message.Version, 1
|
|
|
|
|
2019-11-14 23:17:14 +01:00
|
|
|
for post_url in self.posts():
|
2019-11-16 23:51:02 +01:00
|
|
|
try:
|
2020-03-10 01:49:59 +01:00
|
|
|
post = self.extract_post(post_url)
|
|
|
|
url = post.get("url")
|
2020-03-18 02:17:43 +01:00
|
|
|
except Exception:
|
2020-10-16 01:16:12 +02:00
|
|
|
self.log.debug("", exc_info=True)
|
2019-11-16 23:51:02 +01:00
|
|
|
url = None
|
2020-03-10 01:49:59 +01:00
|
|
|
|
|
|
|
if url:
|
|
|
|
yield Message.Directory, post
|
|
|
|
yield Message.Url, url, text.nameext_from_url(url, post)
|
2020-10-08 22:05:44 +02:00
|
|
|
|
|
|
|
for num, url in enumerate(text.extract_iter(
|
|
|
|
post["_comment"], 'data-smartload-src="', '"'), 1):
|
|
|
|
post["num"] = num
|
|
|
|
post["_index"] = "{}_{:>02}".format(post["index"], num)
|
2020-10-12 17:24:07 +02:00
|
|
|
url = text.ensure_http_scheme(url)
|
|
|
|
yield Message.Url, url, text.nameext_from_url(url, post)
|
2020-03-10 01:49:59 +01:00
|
|
|
else:
|
|
|
|
self.log.warning(
|
|
|
|
"Unable to get download URL for '%s'", post_url)
|
2018-11-27 15:44:53 +01:00
|
|
|
|
2019-11-14 23:17:14 +01:00
|
|
|
def posts(self):
|
2018-11-27 15:44:53 +01:00
|
|
|
"""Return urls of all relevant image pages"""
|
2019-11-16 23:51:02 +01:00
|
|
|
return self._pagination(self.subcategory)
|
2018-11-27 15:44:53 +01:00
|
|
|
|
2019-11-15 23:54:07 +01:00
|
|
|
def login(self):
|
|
|
|
username, password = self._get_auth_info()
|
|
|
|
if username:
|
|
|
|
self._update_cookies(self._login_impl(username, password))
|
|
|
|
|
|
|
|
@cache(maxage=360*24*3600, keyarg=1)
|
|
|
|
def _login_impl(self, username, password):
|
|
|
|
self.log.info("Logging in as %s", username)
|
|
|
|
|
|
|
|
url = self.root + "/passport/"
|
|
|
|
page = self.request(url).text
|
|
|
|
headers = {"Origin": self.root, "Referer": url}
|
|
|
|
|
|
|
|
url = text.urljoin(self.root, text.extract(page, 'action="', '"')[0])
|
|
|
|
data = {
|
|
|
|
"username": username,
|
|
|
|
"password": password,
|
|
|
|
"remember": "1",
|
|
|
|
"login" : "1",
|
|
|
|
}
|
|
|
|
|
|
|
|
response = self.request(url, method="POST", headers=headers, data=data)
|
|
|
|
if not response.history:
|
|
|
|
raise exception.AuthenticationError()
|
|
|
|
|
|
|
|
return {
|
|
|
|
cookie.name: cookie.value
|
|
|
|
for cookie in response.history[0].cookies
|
|
|
|
if cookie.expires and cookie.domain == self.cookiedomain
|
|
|
|
}
|
|
|
|
|
2019-11-16 23:51:02 +01:00
|
|
|
def extract_post(self, post_url):
|
2020-03-10 01:49:59 +01:00
|
|
|
response = self.request(post_url, fatal=False)
|
|
|
|
if response.status_code >= 400:
|
|
|
|
return {}
|
|
|
|
page = response.text
|
2019-11-16 23:51:02 +01:00
|
|
|
extr = text.extract_from(page)
|
|
|
|
|
|
|
|
if "/art/view/" in post_url:
|
|
|
|
data = self._extract_image_data(extr, post_url)
|
|
|
|
elif "/audio/listen/" in post_url:
|
|
|
|
data = self._extract_audio_data(extr, post_url)
|
|
|
|
else:
|
|
|
|
data = self._extract_media_data(extr, post_url)
|
|
|
|
|
2020-10-08 22:05:44 +02:00
|
|
|
data["_comment"] = extr('id="author_comments"', '</div>')
|
|
|
|
data["comment"] = text.unescape(text.remove_html(
|
|
|
|
data["_comment"].partition(">")[2], "", ""))
|
2019-11-16 23:51:02 +01:00
|
|
|
data["favorites"] = text.parse_int(extr(
|
|
|
|
'id="faves_load">', '<').replace(",", ""))
|
|
|
|
data["score"] = text.parse_float(extr('id="score_number">', '<'))
|
2020-03-10 01:49:59 +01:00
|
|
|
data["tags"] = text.split_html(extr('<dd class="tags">', '</dd>'))
|
2019-11-16 23:51:02 +01:00
|
|
|
data["artist"] = [
|
|
|
|
text.extract(user, '//', '.')[0]
|
|
|
|
for user in text.extract_iter(page, '<div class="item-user">', '>')
|
|
|
|
]
|
|
|
|
|
|
|
|
data["tags"].sort()
|
|
|
|
data["user"] = self.user or data["artist"][0]
|
|
|
|
return data
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def _extract_image_data(extr, url):
|
|
|
|
full = text.extract_from(json.loads(extr('"full_image_text":', '});')))
|
|
|
|
data = {
|
|
|
|
"title" : text.unescape(extr('"og:title" content="', '"')),
|
|
|
|
"description": text.unescape(extr(':description" content="', '"')),
|
|
|
|
"date" : text.parse_datetime(extr(
|
|
|
|
'itemprop="datePublished" content="', '"')),
|
|
|
|
"rating" : extr('class="rated-', '"'),
|
|
|
|
"url" : full('src="', '"'),
|
|
|
|
"width" : text.parse_int(full('width="', '"')),
|
|
|
|
"height" : text.parse_int(full('height="', '"')),
|
|
|
|
}
|
2020-10-08 22:05:44 +02:00
|
|
|
index = data["url"].rpartition("/")[2].partition("_")[0]
|
|
|
|
data["index"] = text.parse_int(index)
|
|
|
|
data["_index"] = index
|
2019-11-16 23:51:02 +01:00
|
|
|
return data
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def _extract_audio_data(extr, url):
|
2020-10-08 22:05:44 +02:00
|
|
|
index = url.split("/")[5]
|
2019-11-16 23:51:02 +01:00
|
|
|
return {
|
|
|
|
"title" : text.unescape(extr('"og:title" content="', '"')),
|
|
|
|
"description": text.unescape(extr(':description" content="', '"')),
|
|
|
|
"date" : text.parse_datetime(extr(
|
|
|
|
'itemprop="datePublished" content="', '"')),
|
|
|
|
"url" : extr('{"url":"', '"').replace("\\/", "/"),
|
2020-10-08 22:05:44 +02:00
|
|
|
"index" : text.parse_int(index),
|
|
|
|
"_index" : index,
|
2019-11-16 23:51:02 +01:00
|
|
|
"rating" : "",
|
|
|
|
}
|
|
|
|
|
2020-10-01 20:07:25 +02:00
|
|
|
def _extract_media_data(self, extr, url):
|
|
|
|
index = url.split("/")[5]
|
|
|
|
title = extr('"og:title" content="', '"')
|
|
|
|
src = extr('{"url":"', '"')
|
|
|
|
|
|
|
|
if src:
|
|
|
|
src = src.replace("\\/", "/")
|
2020-10-16 01:16:12 +02:00
|
|
|
fallback = ()
|
2020-10-01 20:07:25 +02:00
|
|
|
date = text.parse_datetime(extr(
|
|
|
|
'itemprop="datePublished" content="', '"'))
|
|
|
|
else:
|
|
|
|
url = self.root + "/portal/video/" + index
|
|
|
|
headers = {
|
|
|
|
"Accept": "application/json, text/javascript, */*; q=0.01",
|
|
|
|
"X-Requested-With": "XMLHttpRequest",
|
|
|
|
"Referer": self.root,
|
|
|
|
}
|
2020-10-16 01:16:12 +02:00
|
|
|
sources = self.request(url, headers=headers).json()["sources"]
|
|
|
|
src = sources["360p"][0]["src"].replace(".360p.", ".")
|
2020-10-23 00:39:19 +02:00
|
|
|
fallback = self._video_fallback(sources)
|
2020-10-01 20:07:25 +02:00
|
|
|
date = text.parse_timestamp(src.rpartition("?")[2])
|
|
|
|
|
2019-11-16 23:51:02 +01:00
|
|
|
return {
|
2020-10-01 20:07:25 +02:00
|
|
|
"title" : text.unescape(title),
|
|
|
|
"url" : src,
|
|
|
|
"date" : date,
|
2019-11-16 23:51:02 +01:00
|
|
|
"description": text.unescape(extr(
|
|
|
|
'itemprop="description" content="', '"')),
|
|
|
|
"rating" : extr('class="rated-', '"'),
|
2020-10-01 20:07:25 +02:00
|
|
|
"index" : text.parse_int(index),
|
2020-10-08 22:05:44 +02:00
|
|
|
"_index" : index,
|
2020-10-16 01:16:12 +02:00
|
|
|
"_fallback" : fallback,
|
2019-11-16 23:51:02 +01:00
|
|
|
}
|
|
|
|
|
2020-10-23 00:39:19 +02:00
|
|
|
@staticmethod
|
|
|
|
def _video_fallback(sources):
|
|
|
|
sources = list(sources.items())
|
|
|
|
sources.sort(key=lambda src: text.parse_int(src[0][:-1]), reverse=True)
|
|
|
|
for src in sources:
|
|
|
|
yield src[1][0]["src"]
|
|
|
|
|
2019-11-16 23:51:02 +01:00
|
|
|
def _pagination(self, kind):
|
|
|
|
root = self.user_root
|
2018-11-27 15:44:53 +01:00
|
|
|
headers = {
|
|
|
|
"Accept": "application/json, text/javascript, */*; q=0.01",
|
2019-11-16 23:51:02 +01:00
|
|
|
"X-Requested-With": "XMLHttpRequest",
|
|
|
|
"Referer": root,
|
2018-11-27 15:44:53 +01:00
|
|
|
}
|
2019-11-16 23:51:02 +01:00
|
|
|
url = "{}/{}/page/1".format(root, kind)
|
2018-11-27 15:44:53 +01:00
|
|
|
|
|
|
|
while True:
|
2019-11-16 23:51:02 +01:00
|
|
|
with self.request(url, headers=headers, fatal=False) as response:
|
|
|
|
try:
|
|
|
|
data = response.json()
|
|
|
|
except ValueError:
|
|
|
|
return
|
|
|
|
if not data:
|
|
|
|
return
|
|
|
|
if "errors" in data:
|
|
|
|
msg = ", ".join(text.unescape(e) for e in data["errors"])
|
|
|
|
raise exception.StopExtraction(msg)
|
2018-11-27 15:44:53 +01:00
|
|
|
|
|
|
|
for year in data["sequence"]:
|
|
|
|
for item in data["years"][str(year)]["items"]:
|
|
|
|
page_url = text.extract(item, 'href="', '"')[0]
|
2019-11-16 23:51:02 +01:00
|
|
|
yield text.urljoin(root, page_url)
|
2018-11-27 15:44:53 +01:00
|
|
|
|
|
|
|
if not data["more"]:
|
|
|
|
return
|
2019-11-16 23:51:02 +01:00
|
|
|
url = text.urljoin(root, data["more"])
|
2018-11-27 15:44:53 +01:00
|
|
|
|
|
|
|
|
2019-01-14 16:06:23 +01:00
|
|
|
class NewgroundsImageExtractor(NewgroundsExtractor):
|
2018-11-27 15:44:53 +01:00
|
|
|
"""Extractor for a single image from newgrounds.com"""
|
|
|
|
subcategory = "image"
|
2019-02-08 13:45:40 +01:00
|
|
|
pattern = (r"(?:https?://)?(?:"
|
2020-10-22 23:12:59 +02:00
|
|
|
r"(?:www\.)?newgrounds\.com/art/view/([^/?#]+)/[^/?#]+"
|
2019-02-08 13:45:40 +01:00
|
|
|
r"|art\.ngfiles\.com/images/\d+/\d+_([^_]+)_([^.]+))")
|
|
|
|
test = (
|
2019-11-16 23:51:02 +01:00
|
|
|
("https://www.newgrounds.com/art/view/tomfulp/ryu-is-hawt", {
|
|
|
|
"url": "57f182bcbbf2612690c3a54f16ffa1da5105245e",
|
|
|
|
"content": "8f395e08333eb2457ba8d8b715238f8910221365",
|
|
|
|
"keyword": {
|
|
|
|
"artist" : ["tomfulp"],
|
|
|
|
"comment" : "re:Consider this the bottom threshold for ",
|
2020-02-23 16:48:30 +01:00
|
|
|
"date" : "dt:2009-06-04 14:44:05",
|
2019-11-16 23:51:02 +01:00
|
|
|
"description": "re:Consider this the bottom threshold for ",
|
|
|
|
"favorites" : int,
|
|
|
|
"filename" : "94_tomfulp_ryu-is-hawt",
|
|
|
|
"height" : 476,
|
|
|
|
"index" : 94,
|
|
|
|
"rating" : "e",
|
|
|
|
"score" : float,
|
|
|
|
"tags" : ["ryu", "streetfighter"],
|
|
|
|
"title" : "Ryu is Hawt",
|
|
|
|
"user" : "tomfulp",
|
|
|
|
"width" : 447,
|
|
|
|
},
|
2019-01-25 16:35:12 +01:00
|
|
|
}),
|
2019-11-16 23:51:02 +01:00
|
|
|
("https://art.ngfiles.com/images/0/94_tomfulp_ryu-is-hawt.gif", {
|
|
|
|
"url": "57f182bcbbf2612690c3a54f16ffa1da5105245e",
|
2019-01-25 16:35:12 +01:00
|
|
|
}),
|
2020-10-08 22:05:44 +02:00
|
|
|
("https://www.newgrounds.com/art/view/sailoryon/yon-dream-buster", {
|
|
|
|
"url": "84eec95e663041a80630df72719f231e157e5f5d",
|
|
|
|
"count": 2,
|
|
|
|
})
|
2019-02-08 13:45:40 +01:00
|
|
|
)
|
2018-11-27 15:44:53 +01:00
|
|
|
|
|
|
|
def __init__(self, match):
|
2019-01-14 16:06:23 +01:00
|
|
|
NewgroundsExtractor.__init__(self, match)
|
2019-01-25 16:35:12 +01:00
|
|
|
if match.group(2):
|
|
|
|
self.user = match.group(2)
|
2019-11-14 23:17:14 +01:00
|
|
|
self.post_url = "https://www.newgrounds.com/art/view/{}/{}".format(
|
2019-01-25 16:35:12 +01:00
|
|
|
self.user, match.group(3))
|
|
|
|
else:
|
2020-05-19 21:25:07 +02:00
|
|
|
self.post_url = text.ensure_http_scheme(match.group(0))
|
2019-11-16 23:51:02 +01:00
|
|
|
|
|
|
|
def posts(self):
|
|
|
|
return (self.post_url,)
|
|
|
|
|
|
|
|
|
|
|
|
class NewgroundsMediaExtractor(NewgroundsExtractor):
|
|
|
|
"""Extractor for a media file from newgrounds.com"""
|
|
|
|
subcategory = "media"
|
|
|
|
pattern = (r"(?:https?://)?(?:www\.)?newgrounds\.com"
|
|
|
|
r"(/(?:portal/view|audio/listen)/\d+)")
|
|
|
|
test = (
|
2020-10-01 20:07:25 +02:00
|
|
|
("https://www.newgrounds.com/portal/view/595355", {
|
2020-10-07 22:53:53 +02:00
|
|
|
"pattern": r"https://uploads\.ungrounded\.net/alternate/564000"
|
|
|
|
r"/564957_alternate_31\.mp4\?1359712249",
|
2019-11-16 23:51:02 +01:00
|
|
|
"keyword": {
|
2020-10-01 20:07:25 +02:00
|
|
|
"artist" : ["kickinthehead", "danpaladin", "tomfulp"],
|
|
|
|
"comment" : "re:My fan trailer for Alien Hominid HD!",
|
|
|
|
"date" : "dt:2013-02-01 09:50:49",
|
2019-11-16 23:51:02 +01:00
|
|
|
"favorites" : int,
|
2020-10-07 22:53:53 +02:00
|
|
|
"filename" : "564957_alternate_31",
|
2020-10-01 20:07:25 +02:00
|
|
|
"index" : 595355,
|
|
|
|
"rating" : "e",
|
2019-11-16 23:51:02 +01:00
|
|
|
"score" : float,
|
2020-10-01 20:07:25 +02:00
|
|
|
"tags" : ["alienhominid", "trailer"],
|
|
|
|
"title" : "Alien Hominid Fan Trailer",
|
|
|
|
"user" : "kickinthehead",
|
2019-11-16 23:51:02 +01:00
|
|
|
},
|
|
|
|
}),
|
|
|
|
("https://www.newgrounds.com/audio/listen/609768", {
|
|
|
|
"url": "f4c5490ae559a3b05e46821bb7ee834f93a43c95",
|
|
|
|
"keyword": {
|
|
|
|
"artist" : ["zj", "tomfulp"],
|
|
|
|
"comment" : "re:RECORDED 12-09-2014\n\nFrom The ZJ \"Late ",
|
2020-02-23 16:48:30 +01:00
|
|
|
"date" : "dt:2015-02-23 19:31:59",
|
2019-11-16 23:51:02 +01:00
|
|
|
"description": "From The ZJ Report Show!",
|
2019-11-26 23:27:15 +01:00
|
|
|
"favorites" : int,
|
2019-11-16 23:51:02 +01:00
|
|
|
"index" : 609768,
|
|
|
|
"rating" : "",
|
|
|
|
"score" : float,
|
|
|
|
"tags" : ["fulp", "interview", "tom", "zj"],
|
|
|
|
"title" : "ZJ Interviews Tom Fulp!",
|
|
|
|
"user" : "zj",
|
|
|
|
},
|
|
|
|
}),
|
|
|
|
)
|
|
|
|
|
|
|
|
def __init__(self, match):
|
|
|
|
NewgroundsExtractor.__init__(self, match)
|
|
|
|
self.user = ""
|
|
|
|
self.post_url = self.root + match.group(1)
|
2018-11-27 15:44:53 +01:00
|
|
|
|
2019-11-14 23:17:14 +01:00
|
|
|
def posts(self):
|
|
|
|
return (self.post_url,)
|
2018-11-27 15:44:53 +01:00
|
|
|
|
|
|
|
|
2019-11-16 23:51:02 +01:00
|
|
|
class NewgroundsArtExtractor(NewgroundsExtractor):
|
|
|
|
"""Extractor for all images of a newgrounds user"""
|
|
|
|
subcategory = "art"
|
2020-04-28 21:33:37 +02:00
|
|
|
pattern = r"(?:https?://)?([\w-]+)\.newgrounds\.com/art/?$"
|
2019-11-16 23:51:02 +01:00
|
|
|
test = ("https://tomfulp.newgrounds.com/art", {
|
|
|
|
"pattern": NewgroundsImageExtractor.pattern,
|
|
|
|
"count": ">= 3",
|
|
|
|
})
|
|
|
|
|
|
|
|
|
|
|
|
class NewgroundsAudioExtractor(NewgroundsExtractor):
|
|
|
|
"""Extractor for all audio submissions of a newgrounds user"""
|
|
|
|
subcategory = "audio"
|
2020-04-28 21:33:37 +02:00
|
|
|
pattern = r"(?:https?://)?([\w-]+)\.newgrounds\.com/audio/?$"
|
2019-11-16 23:51:02 +01:00
|
|
|
test = ("https://tomfulp.newgrounds.com/audio", {
|
|
|
|
"pattern": r"https://audio.ngfiles.com/\d+/\d+_.+\.mp3",
|
|
|
|
"count": ">= 4",
|
|
|
|
})
|
|
|
|
|
|
|
|
|
|
|
|
class NewgroundsMoviesExtractor(NewgroundsExtractor):
|
|
|
|
"""Extractor for all movies of a newgrounds user"""
|
|
|
|
subcategory = "movies"
|
2020-04-28 21:33:37 +02:00
|
|
|
pattern = r"(?:https?://)?([\w-]+)\.newgrounds\.com/movies/?$"
|
2019-07-01 20:02:47 +02:00
|
|
|
test = ("https://tomfulp.newgrounds.com/movies", {
|
2019-11-16 23:51:02 +01:00
|
|
|
"pattern": r"https://uploads.ungrounded.net(/alternate)?/\d+/\d+_.+",
|
|
|
|
"range": "1-10",
|
|
|
|
"count": 10,
|
2019-02-08 13:45:40 +01:00
|
|
|
})
|
2018-11-27 15:44:53 +01:00
|
|
|
|
|
|
|
|
2019-11-16 23:51:02 +01:00
|
|
|
class NewgroundsUserExtractor(NewgroundsExtractor):
|
|
|
|
"""Extractor for a newgrounds user profile"""
|
|
|
|
subcategory = "user"
|
2020-04-28 21:33:37 +02:00
|
|
|
pattern = r"(?:https?://)?([\w-]+)\.newgrounds\.com/?$"
|
2019-11-16 23:51:02 +01:00
|
|
|
test = (
|
|
|
|
("https://tomfulp.newgrounds.com", {
|
|
|
|
"pattern": "https://tomfulp.newgrounds.com/art$",
|
|
|
|
}),
|
|
|
|
("https://tomfulp.newgrounds.com", {
|
|
|
|
"options": (("include", "all"),),
|
|
|
|
"pattern": "https://tomfulp.newgrounds.com/(art|audio|movies)$",
|
|
|
|
"count": 3,
|
|
|
|
}),
|
|
|
|
)
|
|
|
|
|
|
|
|
def items(self):
|
2019-11-22 23:20:21 +01:00
|
|
|
base = self.user_root + "/"
|
|
|
|
return self._dispatch_extractors((
|
|
|
|
(NewgroundsArtExtractor , base + "art"),
|
|
|
|
(NewgroundsAudioExtractor , base + "audio"),
|
|
|
|
(NewgroundsMoviesExtractor, base + "movies"),
|
|
|
|
), ("art",))
|
2020-03-10 01:07:09 +01:00
|
|
|
|
|
|
|
|
|
|
|
class NewgroundsFavoriteExtractor(NewgroundsExtractor):
|
|
|
|
"""Extractor for posts favorited by a newgrounds user"""
|
|
|
|
subcategory = "favorite"
|
|
|
|
directory_fmt = ("{category}", "{user}", "Favorites")
|
|
|
|
pattern = (r"(?:https?://)?([^.]+)\.newgrounds\.com"
|
2020-04-16 23:24:08 +02:00
|
|
|
r"/favorites(?!/following)(?:/(art|audio|movies))?/?")
|
2020-03-10 01:07:09 +01:00
|
|
|
test = (
|
|
|
|
("https://tomfulp.newgrounds.com/favorites/art", {
|
|
|
|
"range": "1-10",
|
|
|
|
"count": ">= 10",
|
|
|
|
}),
|
|
|
|
("https://tomfulp.newgrounds.com/favorites/audio"),
|
|
|
|
("https://tomfulp.newgrounds.com/favorites/movies"),
|
|
|
|
("https://tomfulp.newgrounds.com/favorites/"),
|
|
|
|
)
|
|
|
|
|
|
|
|
def __init__(self, match):
|
|
|
|
NewgroundsExtractor.__init__(self, match)
|
|
|
|
self.kind = match.group(2)
|
|
|
|
|
|
|
|
def posts(self):
|
|
|
|
if self.kind:
|
|
|
|
return self._pagination(self.kind)
|
|
|
|
return itertools.chain.from_iterable(
|
|
|
|
self._pagination(k) for k in ("art", "audio", "movies")
|
|
|
|
)
|
|
|
|
|
|
|
|
def _pagination(self, kind):
|
|
|
|
num = 1
|
|
|
|
headers = {
|
|
|
|
"Accept": "application/json, text/javascript, */*; q=0.01",
|
|
|
|
"X-Requested-With": "XMLHttpRequest",
|
|
|
|
"Referer": self.user_root,
|
|
|
|
}
|
|
|
|
|
|
|
|
while True:
|
|
|
|
url = "{}/favorites/{}/{}".format(self.user_root, kind, num)
|
|
|
|
response = self.request(url, headers=headers)
|
|
|
|
if response.history:
|
|
|
|
return
|
|
|
|
|
2020-04-16 23:24:08 +02:00
|
|
|
favs = self._extract_favorites(response.text)
|
|
|
|
yield from favs
|
|
|
|
|
2020-03-10 01:07:09 +01:00
|
|
|
if len(favs) < 24:
|
|
|
|
return
|
|
|
|
num += 1
|
2020-04-16 23:24:08 +02:00
|
|
|
|
|
|
|
def _extract_favorites(self, page):
|
|
|
|
return [
|
|
|
|
self.root + path
|
|
|
|
for path in text.extract_iter(
|
2020-07-13 23:05:43 +02:00
|
|
|
page, 'href="https://www.newgrounds.com', '"')
|
2020-04-16 23:24:08 +02:00
|
|
|
]
|
|
|
|
|
|
|
|
|
|
|
|
class NewgroundsFollowingExtractor(NewgroundsFavoriteExtractor):
|
|
|
|
"""Extractor for a newgrounds user's favorited users"""
|
|
|
|
subcategory = "following"
|
|
|
|
pattern = r"(?:https?://)?([^.]+)\.newgrounds\.com/favorites/(following)"
|
|
|
|
test = ("https://tomfulp.newgrounds.com/favorites/following", {
|
|
|
|
"pattern": NewgroundsUserExtractor.pattern,
|
|
|
|
"range": "76-125",
|
|
|
|
"count": 50,
|
|
|
|
})
|
|
|
|
|
|
|
|
def items(self):
|
|
|
|
data = {"_extractor": NewgroundsUserExtractor}
|
|
|
|
for url in self._pagination(self.kind):
|
|
|
|
yield Message.Queue, url, data
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def _extract_favorites(page):
|
|
|
|
return [
|
2020-05-19 21:25:07 +02:00
|
|
|
text.ensure_http_scheme(user.rpartition('"')[2])
|
2020-04-16 23:24:08 +02:00
|
|
|
for user in text.extract_iter(page, 'class="item-user', '"><img')
|
|
|
|
]
|