2017-05-23 09:38:50 +02:00
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
|
2023-03-06 12:18:25 +01:00
|
|
|
# Copyright 2017-2023 Mike Fährmann
|
2017-05-23 09:38:50 +02:00
|
|
|
#
|
|
|
|
# This program is free software; you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License version 2 as
|
|
|
|
# published by the Free Software Foundation.
|
|
|
|
|
2019-09-22 21:46:10 +02:00
|
|
|
"""Extractors for https://www.reddit.com/"""
|
2017-05-23 09:38:50 +02:00
|
|
|
|
|
|
|
from .common import Extractor, Message
|
2020-01-28 22:23:57 +01:00
|
|
|
from .. import text, util, exception
|
2017-05-23 09:38:50 +02:00
|
|
|
from ..cache import cache
|
|
|
|
|
|
|
|
|
|
|
|
class RedditExtractor(Extractor):
|
|
|
|
"""Base class for reddit extractors"""
|
|
|
|
category = "reddit"
|
2020-01-28 22:23:57 +01:00
|
|
|
directory_fmt = ("{category}", "{subreddit}")
|
2020-08-03 22:06:15 +02:00
|
|
|
filename_fmt = "{id}{num:? //>02} {title[:220]}.{extension}"
|
2020-01-29 22:12:55 +01:00
|
|
|
archive_fmt = "{filename}"
|
2023-07-21 22:38:39 +02:00
|
|
|
cookies_domain = ".reddit.com"
|
2023-07-14 14:41:16 +02:00
|
|
|
request_interval = 0.6
|
2017-05-23 09:38:50 +02:00
|
|
|
|
|
|
|
def items(self):
|
2021-08-30 22:39:40 +02:00
|
|
|
self.api = RedditAPI(self)
|
2020-01-28 22:23:57 +01:00
|
|
|
match_submission = RedditSubmissionExtractor.pattern.match
|
|
|
|
match_subreddit = RedditSubredditExtractor.pattern.match
|
|
|
|
match_user = RedditUserExtractor.pattern.match
|
|
|
|
|
2020-01-29 18:32:37 +01:00
|
|
|
parentdir = self.config("parent-directory")
|
2021-08-30 22:39:40 +02:00
|
|
|
max_depth = self.config("recursion", 0)
|
2023-03-06 12:18:25 +01:00
|
|
|
|
2020-01-31 23:45:02 +01:00
|
|
|
videos = self.config("videos", True)
|
2023-03-06 12:18:25 +01:00
|
|
|
if videos:
|
|
|
|
if videos == "ytdl":
|
|
|
|
self._extract_video = self._extract_video_ytdl
|
|
|
|
elif videos == "dash":
|
|
|
|
self._extract_video = self._extract_video_dash
|
|
|
|
videos = True
|
2020-01-31 23:45:02 +01:00
|
|
|
|
2017-05-26 16:40:08 +02:00
|
|
|
submissions = self.submissions()
|
2020-01-28 22:23:57 +01:00
|
|
|
visited = set()
|
2017-05-26 16:40:08 +02:00
|
|
|
depth = 0
|
|
|
|
|
2020-01-28 22:23:57 +01:00
|
|
|
while True:
|
|
|
|
extra = []
|
|
|
|
|
|
|
|
for submission, comments in submissions:
|
|
|
|
urls = []
|
|
|
|
|
|
|
|
if submission:
|
2020-10-16 15:48:04 +02:00
|
|
|
submission["date"] = text.parse_timestamp(
|
|
|
|
submission["created_utc"])
|
2020-01-28 22:23:57 +01:00
|
|
|
yield Message.Directory, submission
|
|
|
|
visited.add(submission["id"])
|
2020-08-03 22:06:15 +02:00
|
|
|
submission["num"] = 0
|
2020-01-28 22:23:57 +01:00
|
|
|
|
2023-05-25 13:15:11 +02:00
|
|
|
if "crosspost_parent_list" in submission:
|
2023-06-01 14:51:58 +02:00
|
|
|
try:
|
|
|
|
media = submission["crosspost_parent_list"][-1]
|
|
|
|
except Exception:
|
|
|
|
media = submission
|
2023-05-25 13:15:11 +02:00
|
|
|
else:
|
|
|
|
media = submission
|
2023-04-28 12:13:25 +02:00
|
|
|
|
2023-05-25 13:15:11 +02:00
|
|
|
url = media["url"]
|
2023-08-28 17:17:03 +02:00
|
|
|
if url and url.startswith((
|
|
|
|
"https://i.redd.it/",
|
|
|
|
"https://preview.redd.it/",
|
|
|
|
)):
|
2020-01-28 22:23:57 +01:00
|
|
|
text.nameext_from_url(url, submission)
|
|
|
|
yield Message.Url, url, submission
|
2020-01-31 23:45:02 +01:00
|
|
|
|
2023-05-25 13:15:11 +02:00
|
|
|
elif "gallery_data" in media:
|
|
|
|
for submission["num"], url in enumerate(
|
|
|
|
self._extract_gallery(media), 1):
|
2020-08-03 22:06:15 +02:00
|
|
|
text.nameext_from_url(url, submission)
|
|
|
|
yield Message.Url, url, submission
|
|
|
|
|
2023-05-25 13:15:11 +02:00
|
|
|
elif media["is_video"]:
|
2020-01-31 23:45:02 +01:00
|
|
|
if videos:
|
|
|
|
text.nameext_from_url(url, submission)
|
2023-05-25 13:15:11 +02:00
|
|
|
url = "ytdl:" + self._extract_video(media)
|
2023-03-06 12:18:25 +01:00
|
|
|
yield Message.Url, url, submission
|
2020-01-31 23:45:02 +01:00
|
|
|
|
2020-01-28 22:23:57 +01:00
|
|
|
elif not submission["is_self"]:
|
|
|
|
urls.append((url, submission))
|
2020-01-31 23:45:02 +01:00
|
|
|
|
2020-01-29 18:32:37 +01:00
|
|
|
elif parentdir:
|
|
|
|
yield Message.Directory, comments[0]
|
2020-01-28 22:23:57 +01:00
|
|
|
|
|
|
|
if self.api.comments:
|
|
|
|
if submission:
|
|
|
|
for url in text.extract_iter(
|
|
|
|
submission["selftext_html"] or "",
|
|
|
|
' href="', '"'):
|
|
|
|
urls.append((url, submission))
|
|
|
|
for comment in comments:
|
|
|
|
for url in text.extract_iter(
|
|
|
|
comment["body_html"] or "", ' href="', '"'):
|
|
|
|
urls.append((url, comment))
|
|
|
|
|
|
|
|
for url, data in urls:
|
|
|
|
if not url or url[0] == "#":
|
2017-05-26 16:40:08 +02:00
|
|
|
continue
|
|
|
|
if url[0] == "/":
|
|
|
|
url = "https://www.reddit.com" + url
|
|
|
|
|
2020-01-28 22:23:57 +01:00
|
|
|
match = match_submission(url)
|
2017-05-26 16:40:08 +02:00
|
|
|
if match:
|
|
|
|
extra.append(match.group(1))
|
2020-01-28 22:23:57 +01:00
|
|
|
elif not match_user(url) and not match_subreddit(url):
|
2023-08-24 15:23:38 +02:00
|
|
|
if "preview" in data:
|
|
|
|
data["_fallback"] = self._previews(data)
|
2018-12-29 17:52:43 +01:00
|
|
|
yield Message.Queue, text.unescape(url), data
|
2023-08-24 15:23:38 +02:00
|
|
|
if "_fallback" in data:
|
|
|
|
del data["_fallback"]
|
2017-05-26 16:40:08 +02:00
|
|
|
|
2021-08-30 22:39:40 +02:00
|
|
|
if not extra or depth == max_depth:
|
2020-01-28 22:23:57 +01:00
|
|
|
return
|
|
|
|
depth += 1
|
|
|
|
submissions = (
|
|
|
|
self.api.submission(sid) for sid in extra
|
2020-07-09 19:19:05 +02:00
|
|
|
if sid not in visited
|
2020-01-28 22:23:57 +01:00
|
|
|
)
|
2017-05-26 16:40:08 +02:00
|
|
|
|
|
|
|
def submissions(self):
|
|
|
|
"""Return an iterable containing all (submission, comments) tuples"""
|
|
|
|
|
2020-08-23 21:37:23 +02:00
|
|
|
def _extract_gallery(self, submission):
|
2021-11-03 17:51:27 +01:00
|
|
|
gallery = submission["gallery_data"]
|
|
|
|
if gallery is None:
|
2020-11-21 17:34:38 +01:00
|
|
|
self.log.warning("gallery %s: deleted", submission["id"])
|
2020-08-23 21:37:23 +02:00
|
|
|
return
|
|
|
|
|
2021-11-03 17:51:27 +01:00
|
|
|
meta = submission.get("media_metadata")
|
|
|
|
if meta is None:
|
|
|
|
self.log.warning("gallery %s: missing 'media_metadata'",
|
|
|
|
submission["id"])
|
|
|
|
return
|
|
|
|
|
|
|
|
for item in gallery["items"]:
|
2020-11-21 17:34:38 +01:00
|
|
|
data = meta[item["media_id"]]
|
|
|
|
if data["status"] != "valid" or "s" not in data:
|
|
|
|
self.log.warning(
|
|
|
|
"gallery %s: skipping item %s ('status: %s')",
|
|
|
|
submission["id"], item["media_id"], data.get("status"))
|
|
|
|
continue
|
|
|
|
src = data["s"]
|
2020-08-23 21:37:23 +02:00
|
|
|
url = src.get("u") or src.get("gif") or src.get("mp4")
|
|
|
|
if url:
|
|
|
|
yield url.partition("?")[0].replace("/preview.", "/i.", 1)
|
|
|
|
else:
|
2020-11-21 17:34:38 +01:00
|
|
|
self.log.error(
|
|
|
|
"gallery %s: unable to fetch download URL for item %s",
|
|
|
|
submission["id"], item["media_id"])
|
2020-08-23 21:37:23 +02:00
|
|
|
self.log.debug(src)
|
|
|
|
|
2023-03-06 12:18:25 +01:00
|
|
|
def _extract_video_ytdl(self, submission):
|
|
|
|
return "https://www.reddit.com" + submission["permalink"]
|
|
|
|
|
|
|
|
def _extract_video_dash(self, submission):
|
|
|
|
submission["_ytdl_extra"] = {"title": submission["title"]}
|
|
|
|
try:
|
|
|
|
return (submission["secure_media"]["reddit_video"]["dash_url"] +
|
|
|
|
"#__youtubedl_smuggle=%7B%22to_generic%22%3A+1%7D")
|
|
|
|
except Exception:
|
|
|
|
return submission["url"]
|
|
|
|
|
|
|
|
def _extract_video(self, submission):
|
|
|
|
submission["_ytdl_extra"] = {"title": submission["title"]}
|
|
|
|
return submission["url"]
|
|
|
|
|
2023-08-24 15:23:38 +02:00
|
|
|
def _previews(self, post):
|
|
|
|
try:
|
|
|
|
for image in post["preview"]["images"]:
|
|
|
|
yield image["source"]["url"]
|
|
|
|
except Exception as exc:
|
|
|
|
self.log.debug("%s: %s", exc.__class__.__name__, exc)
|
|
|
|
|
2017-05-23 09:38:50 +02:00
|
|
|
|
|
|
|
class RedditSubredditExtractor(RedditExtractor):
|
2019-09-22 21:46:10 +02:00
|
|
|
"""Extractor for URLs from subreddits on reddit.com"""
|
2017-05-26 16:40:08 +02:00
|
|
|
subcategory = "subreddit"
|
2022-05-26 15:01:46 +02:00
|
|
|
pattern = (r"(?:https?://)?(?:\w+\.)?reddit\.com"
|
|
|
|
r"(/r/[^/?#]+(?:/([a-z]+))?)/?(?:\?([^#]*))?(?:$|#)")
|
2019-02-08 13:45:40 +01:00
|
|
|
test = (
|
2020-01-28 22:23:57 +01:00
|
|
|
("https://www.reddit.com/r/lavaporn/", {
|
|
|
|
"range": "1-20",
|
|
|
|
"count": ">= 20",
|
|
|
|
}),
|
2019-02-08 13:45:40 +01:00
|
|
|
("https://www.reddit.com/r/lavaporn/top/?sort=top&t=month"),
|
|
|
|
("https://old.reddit.com/r/lavaporn/"),
|
|
|
|
("https://np.reddit.com/r/lavaporn/"),
|
|
|
|
("https://m.reddit.com/r/lavaporn/"),
|
|
|
|
)
|
2017-05-23 09:38:50 +02:00
|
|
|
|
|
|
|
def __init__(self, match):
|
2021-09-10 20:02:08 +02:00
|
|
|
self.subreddit, sub, params = match.groups()
|
|
|
|
self.params = text.parse_query(params)
|
|
|
|
if sub:
|
|
|
|
self.subcategory += "-" + sub
|
2019-02-11 13:31:10 +01:00
|
|
|
RedditExtractor.__init__(self, match)
|
2017-05-23 09:38:50 +02:00
|
|
|
|
|
|
|
def submissions(self):
|
2019-09-22 21:46:10 +02:00
|
|
|
return self.api.submissions_subreddit(self.subreddit, self.params)
|
2017-05-23 09:38:50 +02:00
|
|
|
|
|
|
|
|
2022-05-26 15:01:46 +02:00
|
|
|
class RedditHomeExtractor(RedditSubredditExtractor):
|
|
|
|
"""Extractor for submissions from your home feed on reddit.com"""
|
|
|
|
subcategory = "home"
|
|
|
|
pattern = (r"(?:https?://)?(?:\w+\.)?reddit\.com"
|
|
|
|
r"((?:/([a-z]+))?)/?(?:\?([^#]*))?(?:$|#)")
|
|
|
|
test = (
|
|
|
|
("https://www.reddit.com/", {
|
|
|
|
"range": "1-20",
|
|
|
|
"count": ">= 20",
|
|
|
|
}),
|
|
|
|
("https://old.reddit.com/top/?sort=top&t=month"),
|
|
|
|
)
|
|
|
|
|
|
|
|
|
2019-09-22 22:10:18 +02:00
|
|
|
class RedditUserExtractor(RedditExtractor):
|
|
|
|
"""Extractor for URLs from posts by a reddit user"""
|
|
|
|
subcategory = "user"
|
|
|
|
pattern = (r"(?:https?://)?(?:\w+\.)?reddit\.com/u(?:ser)?/"
|
2022-02-13 23:03:46 +01:00
|
|
|
r"([^/?#]+(?:/([a-z]+))?)/?(?:\?([^#]*))?$")
|
2019-09-22 22:10:18 +02:00
|
|
|
test = (
|
|
|
|
("https://www.reddit.com/user/username/", {
|
|
|
|
"count": ">= 2",
|
|
|
|
}),
|
|
|
|
("https://www.reddit.com/user/username/gilded/?sort=top&t=month"),
|
|
|
|
("https://old.reddit.com/user/username/"),
|
|
|
|
("https://www.reddit.com/u/username/"),
|
|
|
|
)
|
|
|
|
|
|
|
|
def __init__(self, match):
|
2021-09-10 20:02:08 +02:00
|
|
|
self.user, sub, params = match.groups()
|
|
|
|
self.params = text.parse_query(params)
|
|
|
|
if sub:
|
|
|
|
self.subcategory += "-" + sub
|
2019-09-22 22:10:18 +02:00
|
|
|
RedditExtractor.__init__(self, match)
|
|
|
|
|
|
|
|
def submissions(self):
|
|
|
|
return self.api.submissions_user(self.user, self.params)
|
|
|
|
|
|
|
|
|
2017-05-23 09:38:50 +02:00
|
|
|
class RedditSubmissionExtractor(RedditExtractor):
|
2019-09-22 21:46:10 +02:00
|
|
|
"""Extractor for URLs from a submission on reddit.com"""
|
2017-05-26 16:40:08 +02:00
|
|
|
subcategory = "submission"
|
2019-02-08 13:45:40 +01:00
|
|
|
pattern = (r"(?:https?://)?(?:"
|
2022-02-13 23:03:46 +01:00
|
|
|
r"(?:\w+\.)?reddit\.com/(?:(?:r|u|user)/[^/?#]+"
|
|
|
|
r"/comments|gallery)|redd\.it)/([a-z0-9]+)")
|
2019-02-08 13:45:40 +01:00
|
|
|
test = (
|
2019-09-27 23:11:28 +02:00
|
|
|
("https://www.reddit.com/r/lavaporn/comments/8cqhub/", {
|
|
|
|
"pattern": r"https://c2.staticflickr.com/8/7272/\w+_k.jpg",
|
|
|
|
"count": 1,
|
2018-02-11 16:28:19 +01:00
|
|
|
}),
|
2019-12-20 16:54:59 +01:00
|
|
|
("https://www.reddit.com/r/lavaporn/comments/8cqhub/", {
|
|
|
|
"options": (("comments", 500),),
|
|
|
|
"pattern": r"https://",
|
|
|
|
"count": 3,
|
|
|
|
}),
|
2020-08-03 22:06:15 +02:00
|
|
|
("https://www.reddit.com/gallery/hrrh23", {
|
|
|
|
"url": "25b91ede15459470274dd17291424b037ed8b0ae",
|
|
|
|
"content": "1e7dde4ee7d5f4c4b45749abfd15b2dbfa27df3f",
|
|
|
|
"count": 3,
|
|
|
|
}),
|
2023-03-06 12:18:25 +01:00
|
|
|
# video
|
|
|
|
("https://www.reddit.com/r/aww/comments/90bu6w/", {
|
|
|
|
"pattern": r"ytdl:https://v.redd.it/gyh95hiqc0b11",
|
|
|
|
"count": 1,
|
|
|
|
}),
|
|
|
|
# video (ytdl)
|
|
|
|
("https://www.reddit.com/r/aww/comments/90bu6w/", {
|
|
|
|
"options": (("videos", "ytdl"),),
|
|
|
|
"pattern": r"ytdl:https://www.reddit.com/r/aww/comments/90bu6w"
|
|
|
|
r"/heat_index_was_110_degrees_so_we_offered_him_a/",
|
|
|
|
"count": 1,
|
|
|
|
}),
|
|
|
|
# video (dash)
|
|
|
|
("https://www.reddit.com/r/aww/comments/90bu6w/", {
|
|
|
|
"options": (("videos", "dash"),),
|
|
|
|
"pattern": r"ytdl:https://v.redd.it/gyh95hiqc0b11"
|
|
|
|
r"/DASHPlaylist.mpd\?a=",
|
|
|
|
"count": 1,
|
|
|
|
}),
|
2020-08-20 20:09:03 +02:00
|
|
|
# deleted gallery (#953)
|
|
|
|
("https://www.reddit.com/gallery/icfgzv", {
|
|
|
|
"count": 0,
|
|
|
|
}),
|
2020-08-23 21:37:23 +02:00
|
|
|
# animated gallery items (#955)
|
|
|
|
("https://www.reddit.com/r/araragi/comments/ib32hm", {
|
|
|
|
"pattern": r"https://i\.redd\.it/\w+\.gif",
|
|
|
|
"count": 2,
|
|
|
|
}),
|
2020-11-21 17:34:38 +01:00
|
|
|
# "failed" gallery item (#1127)
|
|
|
|
("https://www.reddit.com/r/cosplay/comments/jvwaqr", {
|
|
|
|
"count": 1,
|
|
|
|
}),
|
2021-11-03 17:51:27 +01:00
|
|
|
# gallery with no 'media_metadata' (#2001)
|
|
|
|
("https://www.reddit.com/r/kpopfap/comments/qjj04q/", {
|
|
|
|
"count": 0,
|
|
|
|
}),
|
2023-05-25 13:15:11 +02:00
|
|
|
# user page submission (#2301)
|
2022-02-13 23:03:46 +01:00
|
|
|
("https://www.reddit.com/user/TheSpiritTree/comments/srilyf/", {
|
|
|
|
"pattern": r"https://i.redd.it/8fpgv17yqlh81.jpg",
|
|
|
|
"count": 1,
|
|
|
|
}),
|
2023-05-25 13:15:11 +02:00
|
|
|
# cross-posted video (#887, #3586, #3976)
|
|
|
|
("https://www.reddit.com/r/kittengifs/comments/12m0b8d", {
|
|
|
|
"pattern": r"ytdl:https://v\.redd\.it/cvabpjacrvta1",
|
|
|
|
}),
|
2023-08-28 17:17:03 +02:00
|
|
|
# preview.redd.it (#4470)
|
|
|
|
("https://www.reddit.com/r/europe/comments/pm4531/the_name_of/", {
|
|
|
|
"pattern": r"https://preview.redd.it/u9ud4k6xaf271.jpg?auto=webp"
|
|
|
|
r"&s=19b1334cb4409111cda136c01f7b44c2c42bf9fb",
|
|
|
|
}),
|
2023-05-25 13:15:11 +02:00
|
|
|
("https://old.reddit.com/r/lavaporn/comments/2a00np/"),
|
|
|
|
("https://np.reddit.com/r/lavaporn/comments/2a00np/"),
|
|
|
|
("https://m.reddit.com/r/lavaporn/comments/2a00np/"),
|
|
|
|
("https://redd.it/2a00np/"),
|
2019-02-08 13:45:40 +01:00
|
|
|
)
|
2017-05-23 09:38:50 +02:00
|
|
|
|
|
|
|
def __init__(self, match):
|
2019-02-11 13:31:10 +01:00
|
|
|
RedditExtractor.__init__(self, match)
|
2017-05-23 09:38:50 +02:00
|
|
|
self.submission_id = match.group(1)
|
|
|
|
|
|
|
|
def submissions(self):
|
|
|
|
return (self.api.submission(self.submission_id),)
|
|
|
|
|
|
|
|
|
2018-01-14 18:55:42 +01:00
|
|
|
class RedditImageExtractor(Extractor):
|
|
|
|
"""Extractor for reddit-hosted images"""
|
|
|
|
category = "reddit"
|
|
|
|
subcategory = "image"
|
2019-02-14 16:07:17 +01:00
|
|
|
archive_fmt = "{filename}"
|
2023-04-20 09:46:32 +02:00
|
|
|
pattern = (r"(?:https?://)?((?:i|preview)\.redd\.it|i\.reddituploads\.com)"
|
|
|
|
r"/([^/?#]+)(\?[^#]*)?")
|
2019-02-08 13:45:40 +01:00
|
|
|
test = (
|
2018-01-14 18:55:42 +01:00
|
|
|
("https://i.redd.it/upjtjcx2npzz.jpg", {
|
|
|
|
"url": "0de614900feef103e580b632190458c0b62b641a",
|
|
|
|
"content": "cc9a68cf286708d5ce23c68e79cd9cf7826db6a3",
|
|
|
|
}),
|
|
|
|
(("https://i.reddituploads.com/0f44f1b1fca2461f957c713d9592617d"
|
|
|
|
"?fit=max&h=1536&w=1536&s=e96ce7846b3c8e1f921d2ce2671fb5e2"), {
|
|
|
|
"url": "f24f25efcedaddeec802e46c60d77ef975dc52a5",
|
2018-11-10 19:14:54 +01:00
|
|
|
"content": "541dbcc3ad77aa01ee21ca49843c5e382371fae7",
|
2018-01-14 18:55:42 +01:00
|
|
|
}),
|
2023-04-20 09:46:32 +02:00
|
|
|
# preview.redd.it -> i.redd.it
|
|
|
|
(("https://preview.redd.it/00af44lpn0u51.jpg?width=960&crop=smart"
|
|
|
|
"&auto=webp&v=enabled&s=dbca8ab84033f4a433772d9c15dbe0429c74e8ac"), {
|
|
|
|
"pattern": r"^https://i\.redd\.it/00af44lpn0u51\.jpg$"
|
|
|
|
}),
|
2019-02-08 13:45:40 +01:00
|
|
|
)
|
2018-01-14 18:55:42 +01:00
|
|
|
|
2023-04-20 09:46:32 +02:00
|
|
|
def __init__(self, match):
|
|
|
|
Extractor.__init__(self, match)
|
|
|
|
domain = match.group(1)
|
|
|
|
self.path = match.group(2)
|
|
|
|
if domain == "preview.redd.it":
|
|
|
|
self.domain = "i.redd.it"
|
|
|
|
self.query = ""
|
|
|
|
else:
|
|
|
|
self.domain = domain
|
|
|
|
self.query = match.group(3) or ""
|
|
|
|
|
2018-01-14 18:55:42 +01:00
|
|
|
def items(self):
|
2023-04-20 09:46:32 +02:00
|
|
|
url = "https://{}/{}{}".format(self.domain, self.path, self.query)
|
|
|
|
data = text.nameext_from_url(url)
|
2018-01-14 18:55:42 +01:00
|
|
|
yield Message.Directory, data
|
2023-04-20 09:46:32 +02:00
|
|
|
yield Message.Url, url, data
|
2018-01-14 18:55:42 +01:00
|
|
|
|
|
|
|
|
2017-05-23 09:38:50 +02:00
|
|
|
class RedditAPI():
|
2021-08-30 23:29:02 +02:00
|
|
|
"""Interface for the Reddit API
|
|
|
|
|
|
|
|
Ref: https://www.reddit.com/dev/api/
|
|
|
|
"""
|
2017-06-08 16:17:13 +02:00
|
|
|
CLIENT_ID = "6N9uN0krSDE-ig"
|
|
|
|
USER_AGENT = "Python:gallery-dl:0.8.4 (by /u/mikf1)"
|
|
|
|
|
|
|
|
def __init__(self, extractor):
|
2017-06-05 18:37:50 +02:00
|
|
|
self.extractor = extractor
|
2017-06-08 16:17:13 +02:00
|
|
|
self.log = extractor.log
|
2017-10-10 17:29:46 +02:00
|
|
|
|
2021-08-30 23:29:02 +02:00
|
|
|
config = extractor.config
|
|
|
|
self.comments = text.parse_int(config("comments", 0))
|
|
|
|
self.morecomments = config("morecomments", False)
|
2020-05-08 21:42:52 +02:00
|
|
|
|
2021-08-30 23:29:02 +02:00
|
|
|
client_id = config("client-id")
|
|
|
|
if client_id is None:
|
|
|
|
self.client_id = self.CLIENT_ID
|
|
|
|
self.headers = {"User-Agent": self.USER_AGENT}
|
|
|
|
else:
|
|
|
|
self.client_id = client_id
|
|
|
|
self.headers = {"User-Agent": config("user-agent")}
|
2017-05-23 09:38:50 +02:00
|
|
|
|
2023-07-12 21:43:00 +02:00
|
|
|
if self.client_id == self.CLIENT_ID:
|
|
|
|
client_id = self.client_id
|
|
|
|
self._warn_429 = True
|
|
|
|
kind = "default"
|
|
|
|
else:
|
|
|
|
client_id = client_id[:5] + "*" * (len(client_id)-5)
|
|
|
|
self._warn_429 = False
|
|
|
|
kind = "custom"
|
|
|
|
|
|
|
|
self.log.debug(
|
|
|
|
"Using %s API credentials (client-id %s)", kind, client_id)
|
|
|
|
|
2021-08-30 23:29:02 +02:00
|
|
|
token = config("refresh-token")
|
2020-05-25 22:19:58 +02:00
|
|
|
if token is None or token == "cache":
|
|
|
|
key = "#" + self.client_id
|
|
|
|
self.refresh_token = _refresh_token_cache(key)
|
|
|
|
else:
|
|
|
|
self.refresh_token = token
|
|
|
|
|
2022-01-14 18:41:01 +01:00
|
|
|
if not self.refresh_token:
|
|
|
|
# allow downloading from quarantined subreddits (#2180)
|
2023-07-21 22:38:39 +02:00
|
|
|
extractor.cookies.set(
|
2022-01-14 18:41:01 +01:00
|
|
|
"_options", '%7B%22pref_quarantine_optin%22%3A%20true%7D',
|
2023-07-21 22:38:39 +02:00
|
|
|
domain=extractor.cookies_domain)
|
2022-01-14 18:41:01 +01:00
|
|
|
|
2017-05-23 09:38:50 +02:00
|
|
|
def submission(self, submission_id):
|
|
|
|
"""Fetch the (submission, comments)=-tuple for a submission id"""
|
|
|
|
endpoint = "/comments/" + submission_id + "/.json"
|
2017-06-13 18:49:07 +02:00
|
|
|
link_id = "t3_" + submission_id if self.morecomments else None
|
2017-06-05 18:37:50 +02:00
|
|
|
submission, comments = self._call(endpoint, {"limit": self.comments})
|
2017-05-23 09:38:50 +02:00
|
|
|
return (submission["data"]["children"][0]["data"],
|
2020-01-28 22:23:57 +01:00
|
|
|
self._flatten(comments, link_id) if self.comments else ())
|
2017-05-23 09:38:50 +02:00
|
|
|
|
2017-05-29 12:34:53 +02:00
|
|
|
def submissions_subreddit(self, subreddit, params):
|
2017-05-23 09:38:50 +02:00
|
|
|
"""Collect all (submission, comments)-tuples of a subreddit"""
|
2022-05-26 15:01:46 +02:00
|
|
|
endpoint = subreddit + "/.json"
|
2017-05-29 12:34:53 +02:00
|
|
|
params["limit"] = 100
|
2017-05-23 09:38:50 +02:00
|
|
|
return self._pagination(endpoint, params)
|
|
|
|
|
2019-09-22 22:10:18 +02:00
|
|
|
def submissions_user(self, user, params):
|
|
|
|
"""Collect all (submission, comments)-tuples posted by a user"""
|
|
|
|
endpoint = "/user/" + user + "/.json"
|
|
|
|
params["limit"] = 100
|
|
|
|
return self._pagination(endpoint, params)
|
|
|
|
|
2017-06-13 18:49:07 +02:00
|
|
|
def morechildren(self, link_id, children):
|
|
|
|
"""Load additional comments from a submission"""
|
|
|
|
endpoint = "/api/morechildren"
|
|
|
|
params = {"link_id": link_id, "api_type": "json"}
|
|
|
|
index, done = 0, False
|
|
|
|
while not done:
|
|
|
|
if len(children) - index < 100:
|
|
|
|
done = True
|
|
|
|
params["children"] = ",".join(children[index:index + 100])
|
|
|
|
index += 100
|
|
|
|
|
|
|
|
data = self._call(endpoint, params)["json"]
|
|
|
|
for thing in data["data"]["things"]:
|
|
|
|
if thing["kind"] == "more":
|
|
|
|
children.extend(thing["data"]["children"])
|
|
|
|
else:
|
|
|
|
yield thing["data"]
|
|
|
|
|
2017-05-23 09:38:50 +02:00
|
|
|
def authenticate(self):
|
|
|
|
"""Authenticate the application by requesting an access token"""
|
2020-05-08 21:42:52 +02:00
|
|
|
self.headers["Authorization"] = \
|
|
|
|
self._authenticate_impl(self.refresh_token)
|
2017-05-23 09:38:50 +02:00
|
|
|
|
2019-03-14 22:21:49 +01:00
|
|
|
@cache(maxage=3600, keyarg=1)
|
2017-06-08 16:17:13 +02:00
|
|
|
def _authenticate_impl(self, refresh_token=None):
|
2017-05-23 09:38:50 +02:00
|
|
|
"""Actual authenticate implementation"""
|
|
|
|
url = "https://www.reddit.com/api/v1/access_token"
|
2020-05-08 21:42:52 +02:00
|
|
|
self.headers["Authorization"] = None
|
|
|
|
|
2017-06-08 16:17:13 +02:00
|
|
|
if refresh_token:
|
2018-08-11 23:54:25 +02:00
|
|
|
self.log.info("Refreshing private access token")
|
2017-06-08 16:17:13 +02:00
|
|
|
data = {"grant_type": "refresh_token",
|
|
|
|
"refresh_token": refresh_token}
|
|
|
|
else:
|
|
|
|
self.log.info("Requesting public access token")
|
|
|
|
data = {"grant_type": ("https://oauth.reddit.com/"
|
|
|
|
"grants/installed_client"),
|
|
|
|
"device_id": "DO_NOT_TRACK_THIS_DEVICE"}
|
2019-10-13 22:48:01 +02:00
|
|
|
|
2018-12-22 14:40:35 +01:00
|
|
|
response = self.extractor.request(
|
2020-05-08 21:42:52 +02:00
|
|
|
url, method="POST", headers=self.headers,
|
|
|
|
data=data, auth=(self.client_id, ""), fatal=False)
|
2019-10-13 22:48:01 +02:00
|
|
|
data = response.json()
|
|
|
|
|
2017-05-23 09:38:50 +02:00
|
|
|
if response.status_code != 200:
|
2019-10-13 22:48:01 +02:00
|
|
|
self.log.debug("Server response: %s", data)
|
|
|
|
raise exception.AuthenticationError('"{}: {}"'.format(
|
|
|
|
data.get("error"), data.get("message")))
|
|
|
|
return "Bearer " + data["access_token"]
|
2017-05-23 09:38:50 +02:00
|
|
|
|
|
|
|
def _call(self, endpoint, params):
|
|
|
|
url = "https://oauth.reddit.com" + endpoint
|
2020-05-08 21:42:52 +02:00
|
|
|
params["raw_json"] = "1"
|
2020-01-04 23:21:45 +01:00
|
|
|
|
2023-07-12 21:43:00 +02:00
|
|
|
while True:
|
|
|
|
self.authenticate()
|
|
|
|
response = self.extractor.request(
|
|
|
|
url, params=params, headers=self.headers, fatal=None)
|
|
|
|
|
|
|
|
remaining = response.headers.get("x-ratelimit-remaining")
|
|
|
|
if remaining and float(remaining) < 2:
|
|
|
|
if self._warn_429:
|
|
|
|
self._warn_429 = False
|
|
|
|
self.log.info(
|
|
|
|
"Register your own OAuth application and use its "
|
|
|
|
"credentials to prevent this error: "
|
|
|
|
"https://github.com/mikf/gallery-dl/blob/master"
|
|
|
|
"/docs/configuration.rst"
|
|
|
|
"#extractorredditclient-id--user-agent")
|
|
|
|
self.extractor.wait(
|
|
|
|
seconds=response.headers["x-ratelimit-reset"])
|
|
|
|
continue
|
2020-01-04 23:21:45 +01:00
|
|
|
|
2023-07-12 21:43:00 +02:00
|
|
|
try:
|
|
|
|
data = response.json()
|
|
|
|
except ValueError:
|
|
|
|
raise exception.StopExtraction(text.remove_html(response.text))
|
|
|
|
|
|
|
|
if "error" in data:
|
|
|
|
if data["error"] == 403:
|
|
|
|
raise exception.AuthorizationError()
|
|
|
|
if data["error"] == 404:
|
|
|
|
raise exception.NotFoundError()
|
|
|
|
self.log.debug(data)
|
|
|
|
raise exception.StopExtraction(data.get("message"))
|
|
|
|
return data
|
2017-05-23 09:38:50 +02:00
|
|
|
|
2019-09-22 21:46:10 +02:00
|
|
|
def _pagination(self, endpoint, params):
|
2017-07-01 18:46:38 +02:00
|
|
|
id_min = self._parse_id("id-min", 0)
|
2022-12-12 12:08:19 +01:00
|
|
|
id_max = self._parse_id("id-max", float("inf"))
|
2023-04-21 14:08:45 +02:00
|
|
|
if id_max == 2147483647:
|
|
|
|
self.log.debug("Ignoring 'id-max' setting \"zik0zj\"")
|
|
|
|
id_max = float("inf")
|
2019-07-16 22:54:39 +02:00
|
|
|
date_min, date_max = self.extractor._get_date_min_max(0, 253402210800)
|
2017-06-05 18:37:50 +02:00
|
|
|
|
2017-05-23 09:38:50 +02:00
|
|
|
while True:
|
|
|
|
data = self._call(endpoint, params)["data"]
|
|
|
|
|
2019-12-20 16:49:44 +01:00
|
|
|
for child in data["children"]:
|
|
|
|
kind = child["kind"]
|
|
|
|
post = child["data"]
|
|
|
|
|
|
|
|
if (date_min <= post["created_utc"] <= date_max and
|
|
|
|
id_min <= self._decode(post["id"]) <= id_max):
|
|
|
|
|
|
|
|
if kind == "t3":
|
|
|
|
if post["num_comments"] and self.comments:
|
|
|
|
try:
|
|
|
|
yield self.submission(post["id"])
|
|
|
|
except exception.AuthorizationError:
|
|
|
|
pass
|
|
|
|
else:
|
2020-01-28 22:23:57 +01:00
|
|
|
yield post, ()
|
2019-12-20 16:49:44 +01:00
|
|
|
|
|
|
|
elif kind == "t1" and self.comments:
|
|
|
|
yield None, (post,)
|
2017-05-23 09:38:50 +02:00
|
|
|
|
|
|
|
if not data["after"]:
|
|
|
|
return
|
|
|
|
params["after"] = data["after"]
|
|
|
|
|
2017-06-13 18:49:07 +02:00
|
|
|
def _flatten(self, comments, link_id=None):
|
|
|
|
extra = []
|
2017-05-23 09:38:50 +02:00
|
|
|
queue = comments["data"]["children"]
|
|
|
|
while queue:
|
2017-06-13 18:49:07 +02:00
|
|
|
comment = queue.pop(0)
|
2017-05-23 09:38:50 +02:00
|
|
|
if comment["kind"] == "more":
|
2017-06-13 18:49:07 +02:00
|
|
|
if link_id:
|
|
|
|
extra.extend(comment["data"]["children"])
|
2017-05-23 09:38:50 +02:00
|
|
|
continue
|
|
|
|
comment = comment["data"]
|
|
|
|
yield comment
|
|
|
|
if comment["replies"]:
|
|
|
|
queue += comment["replies"]["data"]["children"]
|
2017-06-13 18:49:07 +02:00
|
|
|
if link_id and extra:
|
|
|
|
yield from self.morechildren(link_id, extra)
|
2017-06-29 17:39:22 +02:00
|
|
|
|
2017-07-01 18:46:38 +02:00
|
|
|
def _parse_id(self, key, default):
|
|
|
|
sid = self.extractor.config(key)
|
2018-09-07 18:27:54 +02:00
|
|
|
return self._decode(sid.rpartition("_")[2].lower()) if sid else default
|
2017-06-29 17:39:22 +02:00
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def _decode(sid):
|
|
|
|
return util.bdecode(sid, "0123456789abcdefghijklmnopqrstuvwxyz")
|
2020-05-25 22:19:58 +02:00
|
|
|
|
|
|
|
|
|
|
|
@cache(maxage=100*365*24*3600, keyarg=0)
|
|
|
|
def _refresh_token_cache(token):
|
|
|
|
if token and token[0] == "#":
|
|
|
|
return None
|
|
|
|
return token
|