2016-10-06 19:12:07 +02:00
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
|
2023-01-03 15:14:23 +01:00
|
|
|
# Copyright 2016-2023 Mike Fährmann
|
2016-10-06 19:12:07 +02:00
|
|
|
#
|
|
|
|
# This program is free software; you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License version 2 as
|
|
|
|
# published by the Free Software Foundation.
|
|
|
|
|
2020-02-02 17:19:14 +01:00
|
|
|
"""Extractors for https://twitter.com/"""
|
2016-10-06 19:12:07 +02:00
|
|
|
|
|
|
|
from .common import Extractor, Message
|
2020-10-15 00:43:26 +02:00
|
|
|
from .. import text, util, exception
|
2023-10-25 16:45:27 +02:00
|
|
|
from ..cache import cache, memcache
|
2022-07-29 19:09:44 +02:00
|
|
|
import itertools
|
2020-11-13 06:47:45 +01:00
|
|
|
import json
|
2023-03-18 13:15:56 +01:00
|
|
|
import re
|
2017-02-01 00:53:19 +01:00
|
|
|
|
2023-11-01 15:44:28 +01:00
|
|
|
BASE_PATTERN = (r"(?:https?://)?(?:www\.|mobile\.)?"
|
|
|
|
r"(?:(?:[fv]x)?twitter|(?:fixup)?x)\.com")
|
2020-07-13 23:48:42 +02:00
|
|
|
|
|
|
|
|
2018-08-17 20:04:11 +02:00
|
|
|
class TwitterExtractor(Extractor):
|
|
|
|
"""Base class for twitter extractors"""
|
2016-10-06 19:12:07 +02:00
|
|
|
category = "twitter"
|
2022-08-27 15:11:59 +02:00
|
|
|
directory_fmt = ("{category}", "{user[name]}")
|
2020-06-06 23:51:54 +02:00
|
|
|
filename_fmt = "{tweet_id}_{num}.{extension}"
|
|
|
|
archive_fmt = "{tweet_id}_{retweet_id}_{num}"
|
2023-07-21 22:38:39 +02:00
|
|
|
cookies_domain = ".twitter.com"
|
|
|
|
cookies_names = ("auth_token",)
|
2018-08-17 20:04:11 +02:00
|
|
|
root = "https://twitter.com"
|
2023-01-15 22:11:04 +01:00
|
|
|
browser = "firefox"
|
2018-08-17 20:04:11 +02:00
|
|
|
|
2018-08-19 20:36:33 +02:00
|
|
|
def __init__(self, match):
|
2019-02-11 13:31:10 +01:00
|
|
|
Extractor.__init__(self, match)
|
2018-08-19 20:36:33 +02:00
|
|
|
self.user = match.group(1)
|
2023-07-25 20:09:44 +02:00
|
|
|
|
|
|
|
def _init(self):
|
2021-05-22 21:07:21 +02:00
|
|
|
self.textonly = self.config("text-tweets", False)
|
2021-06-11 21:19:04 +02:00
|
|
|
self.retweets = self.config("retweets", False)
|
2020-04-29 23:11:24 +02:00
|
|
|
self.replies = self.config("replies", True)
|
2020-01-18 21:26:46 +01:00
|
|
|
self.twitpic = self.config("twitpic", False)
|
2021-10-29 22:10:58 +02:00
|
|
|
self.pinned = self.config("pinned", False)
|
2021-06-11 21:19:04 +02:00
|
|
|
self.quoted = self.config("quoted", False)
|
2020-02-14 01:03:42 +01:00
|
|
|
self.videos = self.config("videos", True)
|
2022-05-21 15:39:25 +02:00
|
|
|
self.cards = self.config("cards", False)
|
2023-11-10 23:40:33 +01:00
|
|
|
self.ads = self.config("ads", False)
|
2022-09-17 17:39:34 +02:00
|
|
|
self.cards_blacklist = self.config("cards-blacklist")
|
2023-02-02 22:01:36 +01:00
|
|
|
|
|
|
|
if not self.config("transform", True):
|
|
|
|
self._transform_user = util.identity
|
|
|
|
self._transform_tweet = util.identity
|
2023-04-16 17:11:21 +02:00
|
|
|
self._user = None
|
|
|
|
self._user_obj = None
|
2020-06-06 23:51:54 +02:00
|
|
|
self._user_cache = {}
|
2021-11-16 22:57:46 +01:00
|
|
|
self._init_sizes()
|
2018-09-30 18:41:39 +02:00
|
|
|
|
2021-11-16 22:57:46 +01:00
|
|
|
def _init_sizes(self):
|
2021-10-05 18:58:10 +02:00
|
|
|
size = self.config("size")
|
|
|
|
if size is None:
|
|
|
|
self._size_image = "orig"
|
2021-12-15 23:17:07 +01:00
|
|
|
self._size_fallback = ("4096x4096", "large", "medium", "small")
|
2021-10-05 18:58:10 +02:00
|
|
|
else:
|
|
|
|
if isinstance(size, str):
|
|
|
|
size = size.split(",")
|
|
|
|
self._size_image = size[0]
|
|
|
|
self._size_fallback = size[1:]
|
|
|
|
|
2018-08-17 20:04:11 +02:00
|
|
|
def items(self):
|
2019-04-07 23:06:57 +02:00
|
|
|
self.login()
|
2022-01-23 01:44:55 +01:00
|
|
|
self.api = TwitterAPI(self)
|
2019-11-30 21:51:08 +01:00
|
|
|
metadata = self.metadata()
|
2018-08-17 20:04:11 +02:00
|
|
|
|
2022-06-12 17:26:51 +02:00
|
|
|
if self.config("expand"):
|
|
|
|
tweets = self._expand_tweets(self.tweets())
|
|
|
|
self.tweets = lambda : tweets
|
|
|
|
|
2022-07-03 16:07:07 +02:00
|
|
|
if self.config("unique", True):
|
|
|
|
seen_tweets = set()
|
|
|
|
else:
|
|
|
|
seen_tweets = None
|
|
|
|
|
2023-03-18 13:15:56 +01:00
|
|
|
if self.twitpic:
|
|
|
|
self._find_twitpic = re.compile(
|
2023-05-25 15:18:49 +02:00
|
|
|
r"https?(://twitpic\.com/(?!photos/)\w+)").findall
|
2023-03-18 13:15:56 +01:00
|
|
|
|
2018-08-17 20:04:11 +02:00
|
|
|
for tweet in self.tweets():
|
2020-06-04 01:22:34 +02:00
|
|
|
|
2022-01-21 23:34:41 +01:00
|
|
|
if "legacy" in tweet:
|
|
|
|
data = tweet["legacy"]
|
|
|
|
else:
|
|
|
|
data = tweet
|
|
|
|
|
|
|
|
if not self.retweets and "retweeted_status_id_str" in data:
|
|
|
|
self.log.debug("Skipping %s (retweet)", data["id_str"])
|
2020-07-11 00:41:50 +02:00
|
|
|
continue
|
2022-01-21 23:34:41 +01:00
|
|
|
if not self.quoted and "quoted_by_id_str" in data:
|
|
|
|
self.log.debug("Skipping %s (quoted tweet)", data["id_str"])
|
2020-06-03 20:51:29 +02:00
|
|
|
continue
|
2022-01-21 23:34:41 +01:00
|
|
|
if "in_reply_to_user_id_str" in data and (
|
2021-08-10 22:02:19 +02:00
|
|
|
not self.replies or (
|
|
|
|
self.replies == "self" and
|
2022-07-17 17:04:24 +02:00
|
|
|
data["user_id_str"] !=
|
|
|
|
(self._user_obj["rest_id"] if self._user else
|
|
|
|
data["in_reply_to_user_id_str"])
|
2021-08-10 22:02:19 +02:00
|
|
|
)
|
|
|
|
):
|
2022-01-21 23:34:41 +01:00
|
|
|
self.log.debug("Skipping %s (reply)", data["id_str"])
|
2021-08-10 22:02:19 +02:00
|
|
|
continue
|
2020-06-03 20:51:29 +02:00
|
|
|
|
2022-12-24 05:35:44 +01:00
|
|
|
if seen_tweets is not None:
|
|
|
|
if data["id_str"] in seen_tweets:
|
|
|
|
self.log.debug(
|
|
|
|
"Skipping %s (previously seen)", data["id_str"])
|
|
|
|
continue
|
|
|
|
seen_tweets.add(data["id_str"])
|
|
|
|
|
2023-04-04 15:48:09 +02:00
|
|
|
if "withheld_scope" in data:
|
|
|
|
txt = data.get("full_text") or data.get("text") or ""
|
|
|
|
self.log.warning("'%s' (%s)", txt, data["id_str"])
|
|
|
|
|
2020-10-22 21:33:53 +02:00
|
|
|
files = []
|
2022-01-21 23:34:41 +01:00
|
|
|
if "extended_entities" in data:
|
2022-01-13 15:58:18 +01:00
|
|
|
self._extract_media(
|
2022-01-21 23:34:41 +01:00
|
|
|
data, data["extended_entities"]["media"], files)
|
2020-10-22 21:33:53 +02:00
|
|
|
if "card" in tweet and self.cards:
|
|
|
|
self._extract_card(tweet, files)
|
2020-06-04 01:22:34 +02:00
|
|
|
if self.twitpic:
|
2022-01-21 23:34:41 +01:00
|
|
|
self._extract_twitpic(data, files)
|
2021-05-22 17:01:49 +02:00
|
|
|
if not files and not self.textonly:
|
2018-08-17 20:04:11 +02:00
|
|
|
continue
|
|
|
|
|
2020-06-06 23:51:54 +02:00
|
|
|
tdata = self._transform_tweet(tweet)
|
|
|
|
tdata.update(metadata)
|
2022-07-10 14:37:04 +02:00
|
|
|
tdata["count"] = len(files)
|
2020-06-06 23:51:54 +02:00
|
|
|
yield Message.Directory, tdata
|
2020-10-22 21:33:53 +02:00
|
|
|
for tdata["num"], file in enumerate(files, 1):
|
|
|
|
file.update(tdata)
|
|
|
|
url = file.pop("url")
|
|
|
|
if "extension" not in file:
|
|
|
|
text.nameext_from_url(url, file)
|
|
|
|
yield Message.Url, url, file
|
|
|
|
|
2022-01-13 15:58:18 +01:00
|
|
|
def _extract_media(self, tweet, entities, files):
|
|
|
|
for media in entities:
|
2022-05-24 12:37:38 +02:00
|
|
|
descr = media.get("ext_alt_text")
|
2020-11-05 22:53:29 +01:00
|
|
|
width = media["original_info"].get("width", 0)
|
|
|
|
height = media["original_info"].get("height", 0)
|
2020-10-22 21:33:53 +02:00
|
|
|
|
|
|
|
if "video_info" in media:
|
|
|
|
if self.videos == "ytdl":
|
|
|
|
files.append({
|
|
|
|
"url": "ytdl:{}/i/web/status/{}".format(
|
|
|
|
self.root, tweet["id_str"]),
|
2022-05-24 12:37:38 +02:00
|
|
|
"width" : width,
|
|
|
|
"height" : height,
|
|
|
|
"extension" : None,
|
|
|
|
"description": descr,
|
2020-10-22 21:33:53 +02:00
|
|
|
})
|
|
|
|
elif self.videos:
|
|
|
|
video_info = media["video_info"]
|
|
|
|
variant = max(
|
|
|
|
video_info["variants"],
|
|
|
|
key=lambda v: v.get("bitrate", 0),
|
|
|
|
)
|
|
|
|
files.append({
|
2022-05-24 12:37:38 +02:00
|
|
|
"url" : variant["url"],
|
|
|
|
"width" : width,
|
|
|
|
"height" : height,
|
|
|
|
"bitrate" : variant.get("bitrate", 0),
|
|
|
|
"duration" : video_info.get(
|
2020-10-22 21:33:53 +02:00
|
|
|
"duration_millis", 0) / 1000,
|
2022-05-24 12:37:38 +02:00
|
|
|
"description": descr,
|
2020-10-22 21:33:53 +02:00
|
|
|
})
|
|
|
|
elif "media_url_https" in media:
|
|
|
|
url = media["media_url_https"]
|
2022-09-17 14:44:42 +02:00
|
|
|
if url[-4] == ".":
|
|
|
|
base, _, fmt = url.rpartition(".")
|
|
|
|
base += "?format=" + fmt + "&name="
|
|
|
|
else:
|
|
|
|
base = url.rpartition("=")[0] + "="
|
2020-10-22 21:33:53 +02:00
|
|
|
files.append(text.nameext_from_url(url, {
|
2022-05-24 12:37:38 +02:00
|
|
|
"url" : base + self._size_image,
|
|
|
|
"width" : width,
|
|
|
|
"height" : height,
|
|
|
|
"_fallback" : self._image_fallback(base),
|
|
|
|
"description": descr,
|
2020-10-22 21:33:53 +02:00
|
|
|
}))
|
|
|
|
else:
|
|
|
|
files.append({"url": media["media_url"]})
|
|
|
|
|
2021-10-05 18:58:10 +02:00
|
|
|
def _image_fallback(self, base):
|
|
|
|
for fmt in self._size_fallback:
|
|
|
|
yield base + fmt
|
2020-12-01 11:53:51 +01:00
|
|
|
|
2020-10-22 21:33:53 +02:00
|
|
|
def _extract_card(self, tweet, files):
|
|
|
|
card = tweet["card"]
|
2022-01-21 23:34:41 +01:00
|
|
|
if "legacy" in card:
|
|
|
|
card = card["legacy"]
|
2022-08-31 10:05:26 +02:00
|
|
|
|
|
|
|
name = card["name"].rpartition(":")[2]
|
2022-09-17 17:39:34 +02:00
|
|
|
bvals = card["binding_values"]
|
|
|
|
if isinstance(bvals, list):
|
|
|
|
bvals = {bval["key"]: bval["value"]
|
|
|
|
for bval in card["binding_values"]}
|
|
|
|
|
|
|
|
cbl = self.cards_blacklist
|
|
|
|
if cbl:
|
|
|
|
if name in cbl:
|
|
|
|
return
|
|
|
|
if "vanity_url" in bvals:
|
|
|
|
domain = bvals["vanity_url"]["string_value"]
|
|
|
|
if domain in cbl or name + ":" + domain in cbl:
|
|
|
|
return
|
2022-01-13 15:58:18 +01:00
|
|
|
|
|
|
|
if name in ("summary", "summary_large_image"):
|
2020-10-22 21:33:53 +02:00
|
|
|
for prefix in ("photo_image_full_size_",
|
|
|
|
"summary_photo_image_",
|
|
|
|
"thumbnail_image_"):
|
|
|
|
for size in ("original", "x_large", "large", "small"):
|
|
|
|
key = prefix + size
|
|
|
|
if key in bvals:
|
2021-09-20 22:32:03 +02:00
|
|
|
value = bvals[key].get("image_value")
|
|
|
|
if value and "url" in value:
|
2022-02-03 23:43:18 +01:00
|
|
|
base, sep, size = value["url"].rpartition("&name=")
|
|
|
|
if sep:
|
|
|
|
base += sep
|
|
|
|
value["url"] = base + self._size_image
|
|
|
|
value["_fallback"] = self._image_fallback(base)
|
2021-09-20 22:32:03 +02:00
|
|
|
files.append(value)
|
|
|
|
return
|
2022-01-13 15:58:18 +01:00
|
|
|
elif name == "unified_card":
|
2023-02-07 23:14:53 +01:00
|
|
|
data = util.json_loads(bvals["unified_card"]["string_value"])
|
2022-08-30 18:14:06 +02:00
|
|
|
self._extract_media(tweet, data["media_entities"].values(), files)
|
|
|
|
return
|
2022-01-13 15:58:18 +01:00
|
|
|
|
2022-01-15 22:02:57 +01:00
|
|
|
if self.cards == "ytdl":
|
2022-01-21 23:34:41 +01:00
|
|
|
tweet_id = tweet.get("rest_id") or tweet["id_str"]
|
|
|
|
url = "ytdl:{}/i/web/status/{}".format(self.root, tweet_id)
|
2020-10-22 21:33:53 +02:00
|
|
|
files.append({"url": url})
|
|
|
|
|
|
|
|
def _extract_twitpic(self, tweet, files):
|
2023-05-25 15:18:49 +02:00
|
|
|
urls = {}
|
|
|
|
|
|
|
|
# collect URLs from entities
|
2023-03-18 13:15:56 +01:00
|
|
|
for url in tweet["entities"].get("urls") or ():
|
2020-06-04 01:22:34 +02:00
|
|
|
url = url["expanded_url"]
|
2022-12-27 05:23:12 +01:00
|
|
|
if "//twitpic.com/" not in url or "/photos/" in url:
|
|
|
|
continue
|
2023-01-05 14:55:55 +01:00
|
|
|
if url.startswith("http:"):
|
|
|
|
url = "https" + url[4:]
|
2023-05-25 15:18:49 +02:00
|
|
|
urls[url] = None
|
|
|
|
|
|
|
|
# collect URLs from text
|
|
|
|
for url in self._find_twitpic(
|
|
|
|
tweet.get("full_text") or tweet.get("text") or ""):
|
|
|
|
urls["https" + url] = None
|
|
|
|
|
|
|
|
# extract actual URLs
|
2023-03-18 13:15:56 +01:00
|
|
|
for url in urls:
|
2023-01-05 14:55:55 +01:00
|
|
|
response = self.request(url, fatal=False)
|
|
|
|
if response.status_code >= 400:
|
2022-12-27 05:23:12 +01:00
|
|
|
continue
|
2023-01-05 14:55:55 +01:00
|
|
|
url = text.extr(response.text, 'name="twitter:image" value="', '"')
|
2022-12-27 05:23:12 +01:00
|
|
|
if url:
|
|
|
|
files.append({"url": url})
|
2020-06-04 01:22:34 +02:00
|
|
|
|
2020-06-06 23:51:54 +02:00
|
|
|
def _transform_tweet(self, tweet):
|
2022-07-17 17:04:24 +02:00
|
|
|
if "author" in tweet:
|
|
|
|
author = tweet["author"]
|
|
|
|
elif "core" in tweet:
|
|
|
|
author = tweet["core"]["user_results"]["result"]
|
2022-01-21 23:34:41 +01:00
|
|
|
else:
|
2022-07-17 17:04:24 +02:00
|
|
|
author = tweet["user"]
|
|
|
|
author = self._transform_user(author)
|
2022-01-21 23:34:41 +01:00
|
|
|
|
|
|
|
if "legacy" in tweet:
|
2023-10-27 23:01:43 +02:00
|
|
|
legacy = tweet["legacy"]
|
|
|
|
else:
|
|
|
|
legacy = tweet
|
|
|
|
tget = legacy.get
|
2022-01-21 23:34:41 +01:00
|
|
|
|
2023-10-27 23:01:43 +02:00
|
|
|
tweet_id = int(legacy["id_str"])
|
2023-04-05 22:10:08 +02:00
|
|
|
if tweet_id >= 300000000000000:
|
|
|
|
date = text.parse_timestamp(
|
|
|
|
((tweet_id >> 22) + 1288834974657) // 1000)
|
|
|
|
else:
|
2023-10-27 17:58:02 +02:00
|
|
|
try:
|
|
|
|
date = text.parse_datetime(
|
2023-10-27 23:01:43 +02:00
|
|
|
legacy["created_at"], "%a %b %d %H:%M:%S %z %Y")
|
2023-10-27 17:58:02 +02:00
|
|
|
except Exception:
|
|
|
|
date = util.NONE
|
2023-04-05 22:10:08 +02:00
|
|
|
|
2020-06-06 23:51:54 +02:00
|
|
|
tdata = {
|
2023-04-05 22:10:08 +02:00
|
|
|
"tweet_id" : tweet_id,
|
2020-06-06 23:51:54 +02:00
|
|
|
"retweet_id" : text.parse_int(
|
2022-03-31 20:31:58 +02:00
|
|
|
tget("retweeted_status_id_str")),
|
2020-06-06 23:51:54 +02:00
|
|
|
"quote_id" : text.parse_int(
|
2022-07-17 18:50:21 +02:00
|
|
|
tget("quoted_by_id_str")),
|
2020-06-06 23:51:54 +02:00
|
|
|
"reply_id" : text.parse_int(
|
2022-03-31 20:31:58 +02:00
|
|
|
tget("in_reply_to_status_id_str")),
|
2023-06-01 15:31:52 +02:00
|
|
|
"conversation_id": text.parse_int(
|
|
|
|
tget("conversation_id_str")),
|
2023-04-05 22:10:08 +02:00
|
|
|
"date" : date,
|
2022-07-17 17:04:24 +02:00
|
|
|
"author" : author,
|
2023-04-05 22:10:08 +02:00
|
|
|
"user" : self._user or author,
|
2023-10-27 23:01:43 +02:00
|
|
|
"lang" : legacy["lang"],
|
|
|
|
"source" : text.extr(tweet["source"], ">", "<"),
|
2023-10-09 15:39:09 +02:00
|
|
|
"sensitive" : tget("possibly_sensitive"),
|
2022-03-31 20:31:58 +02:00
|
|
|
"favorite_count": tget("favorite_count"),
|
|
|
|
"quote_count" : tget("quote_count"),
|
|
|
|
"reply_count" : tget("reply_count"),
|
|
|
|
"retweet_count" : tget("retweet_count"),
|
2020-06-06 23:51:54 +02:00
|
|
|
}
|
|
|
|
|
2023-10-27 23:01:43 +02:00
|
|
|
if "note_tweet" in tweet:
|
|
|
|
note = tweet["note_tweet"]["note_tweet_results"]["result"]
|
|
|
|
content = note["text"]
|
|
|
|
entities = note["entity_set"]
|
|
|
|
else:
|
|
|
|
content = tget("full_text") or tget("text") or ""
|
|
|
|
entities = legacy["entities"]
|
2023-03-17 19:36:07 +01:00
|
|
|
|
2020-06-06 23:51:54 +02:00
|
|
|
hashtags = entities.get("hashtags")
|
|
|
|
if hashtags:
|
|
|
|
tdata["hashtags"] = [t["text"] for t in hashtags]
|
|
|
|
|
|
|
|
mentions = entities.get("user_mentions")
|
|
|
|
if mentions:
|
|
|
|
tdata["mentions"] = [{
|
|
|
|
"id": text.parse_int(u["id_str"]),
|
|
|
|
"name": u["screen_name"],
|
|
|
|
"nick": u["name"],
|
|
|
|
} for u in mentions]
|
|
|
|
|
2023-10-27 23:01:43 +02:00
|
|
|
content = text.unescape(content)
|
2021-05-15 02:46:46 +02:00
|
|
|
urls = entities.get("urls")
|
|
|
|
if urls:
|
|
|
|
for url in urls:
|
|
|
|
content = content.replace(url["url"], url["expanded_url"])
|
2021-05-16 02:35:55 +02:00
|
|
|
txt, _, tco = content.rpartition(" ")
|
|
|
|
tdata["content"] = txt if tco.startswith("https://t.co/") else content
|
2021-05-15 02:46:46 +02:00
|
|
|
|
2024-03-16 02:02:02 +01:00
|
|
|
if "birdwatch_pivot" in tweet:
|
2024-03-30 18:13:38 +01:00
|
|
|
try:
|
|
|
|
tdata["birdwatch"] = \
|
|
|
|
tweet["birdwatch_pivot"]["subtitle"]["text"]
|
|
|
|
except KeyError:
|
|
|
|
self.log.debug("Unable to extract 'birdwatch' note from %s",
|
|
|
|
tweet["birdwatch_pivot"])
|
2023-10-27 23:01:43 +02:00
|
|
|
if "in_reply_to_screen_name" in legacy:
|
|
|
|
tdata["reply_to"] = legacy["in_reply_to_screen_name"]
|
|
|
|
if "quoted_by" in legacy:
|
|
|
|
tdata["quote_by"] = legacy["quoted_by"]
|
2023-08-23 16:16:30 +02:00
|
|
|
if tdata["retweet_id"]:
|
2023-10-27 23:01:43 +02:00
|
|
|
tdata["content"] = "RT @{}: {}".format(
|
|
|
|
author["name"], tdata["content"])
|
2023-08-23 16:16:30 +02:00
|
|
|
tdata["date_original"] = text.parse_timestamp(
|
|
|
|
((tdata["retweet_id"] >> 22) + 1288834974657) // 1000)
|
2020-06-09 21:48:04 +02:00
|
|
|
|
2020-06-06 23:51:54 +02:00
|
|
|
return tdata
|
|
|
|
|
|
|
|
def _transform_user(self, user):
|
2023-07-26 17:53:51 +02:00
|
|
|
try:
|
|
|
|
uid = user.get("rest_id") or user["id_str"]
|
|
|
|
except KeyError:
|
|
|
|
# private/invalid user (#4349)
|
|
|
|
return {}
|
2022-03-31 20:31:58 +02:00
|
|
|
|
2021-08-23 22:28:09 +02:00
|
|
|
try:
|
2022-03-31 20:31:58 +02:00
|
|
|
return self._user_cache[uid]
|
2021-08-23 22:28:09 +02:00
|
|
|
except KeyError:
|
|
|
|
pass
|
2020-06-06 23:51:54 +02:00
|
|
|
|
2022-01-21 23:34:41 +01:00
|
|
|
if "legacy" in user:
|
|
|
|
user = user["legacy"]
|
2022-03-31 20:31:58 +02:00
|
|
|
|
|
|
|
uget = user.get
|
2023-04-04 15:48:09 +02:00
|
|
|
if uget("withheld_scope"):
|
|
|
|
self.log.warning("'%s'", uget("description"))
|
2021-08-23 22:36:55 +02:00
|
|
|
|
2023-04-04 15:48:09 +02:00
|
|
|
entities = user["entities"]
|
2021-08-23 22:28:09 +02:00
|
|
|
self._user_cache[uid] = udata = {
|
|
|
|
"id" : text.parse_int(uid),
|
|
|
|
"name" : user["screen_name"],
|
|
|
|
"nick" : user["name"],
|
2022-03-31 20:31:58 +02:00
|
|
|
"location" : uget("location"),
|
2021-08-23 22:28:09 +02:00
|
|
|
"date" : text.parse_datetime(
|
2022-03-31 20:31:58 +02:00
|
|
|
uget("created_at"), "%a %b %d %H:%M:%S %z %Y"),
|
|
|
|
"verified" : uget("verified", False),
|
2024-03-13 14:46:03 +01:00
|
|
|
"protected" : uget("protected", False),
|
2022-03-31 20:31:58 +02:00
|
|
|
"profile_banner" : uget("profile_banner_url", ""),
|
|
|
|
"profile_image" : uget(
|
2021-08-23 22:28:09 +02:00
|
|
|
"profile_image_url_https", "").replace("_normal.", "."),
|
2022-03-31 20:31:58 +02:00
|
|
|
"favourites_count": uget("favourites_count"),
|
|
|
|
"followers_count" : uget("followers_count"),
|
|
|
|
"friends_count" : uget("friends_count"),
|
|
|
|
"listed_count" : uget("listed_count"),
|
|
|
|
"media_count" : uget("media_count"),
|
|
|
|
"statuses_count" : uget("statuses_count"),
|
2021-08-23 22:28:09 +02:00
|
|
|
}
|
2021-08-23 22:36:55 +02:00
|
|
|
|
2021-08-23 22:49:35 +02:00
|
|
|
descr = user["description"]
|
|
|
|
urls = entities["description"].get("urls")
|
|
|
|
if urls:
|
|
|
|
for url in urls:
|
|
|
|
descr = descr.replace(url["url"], url["expanded_url"])
|
|
|
|
udata["description"] = descr
|
|
|
|
|
2021-08-23 22:36:55 +02:00
|
|
|
if "url" in entities:
|
2021-08-27 18:41:16 +02:00
|
|
|
url = entities["url"]["urls"][0]
|
|
|
|
udata["url"] = url.get("expanded_url") or url.get("url")
|
2021-08-23 22:36:55 +02:00
|
|
|
|
2021-08-23 22:28:09 +02:00
|
|
|
return udata
|
2020-06-06 23:51:54 +02:00
|
|
|
|
2022-07-29 20:26:22 +02:00
|
|
|
def _assign_user(self, user):
|
2023-04-18 19:16:43 +02:00
|
|
|
self._user_obj = user
|
|
|
|
self._user = self._transform_user(user)
|
2022-07-29 20:26:22 +02:00
|
|
|
|
2021-03-15 22:55:24 +01:00
|
|
|
def _users_result(self, users):
|
2021-03-20 01:31:12 +01:00
|
|
|
userfmt = self.config("users")
|
2023-09-15 23:04:30 +02:00
|
|
|
if not userfmt or userfmt == "user":
|
|
|
|
cls = TwitterUserExtractor
|
2021-03-20 01:31:12 +01:00
|
|
|
fmt = (self.root + "/i/user/{rest_id}").format_map
|
2023-09-15 23:04:30 +02:00
|
|
|
elif userfmt == "timeline":
|
|
|
|
cls = TwitterTimelineExtractor
|
|
|
|
fmt = (self.root + "/id:{rest_id}/timeline").format_map
|
2021-03-20 01:31:12 +01:00
|
|
|
elif userfmt == "media":
|
2021-03-15 22:55:24 +01:00
|
|
|
cls = TwitterMediaExtractor
|
2021-03-20 01:31:12 +01:00
|
|
|
fmt = (self.root + "/id:{rest_id}/media").format_map
|
2022-05-23 18:23:21 +02:00
|
|
|
elif userfmt == "tweets":
|
|
|
|
cls = TwitterTweetsExtractor
|
|
|
|
fmt = (self.root + "/id:{rest_id}/tweets").format_map
|
2021-03-15 22:55:24 +01:00
|
|
|
else:
|
2021-03-20 01:31:12 +01:00
|
|
|
cls = None
|
|
|
|
fmt = userfmt.format_map
|
2021-03-15 22:55:24 +01:00
|
|
|
|
|
|
|
for user in users:
|
|
|
|
user["_extractor"] = cls
|
2021-03-20 01:31:12 +01:00
|
|
|
yield Message.Queue, fmt(user), user
|
2021-03-15 22:55:24 +01:00
|
|
|
|
2022-06-12 17:26:51 +02:00
|
|
|
def _expand_tweets(self, tweets):
|
|
|
|
seen = set()
|
|
|
|
for tweet in tweets:
|
2022-12-30 05:39:11 +01:00
|
|
|
obj = tweet["legacy"] if "legacy" in tweet else tweet
|
|
|
|
cid = obj.get("conversation_id_str")
|
|
|
|
if not cid:
|
|
|
|
tid = obj["id_str"]
|
|
|
|
self.log.warning(
|
|
|
|
"Unable to expand %s (no 'conversation_id')", tid)
|
|
|
|
continue
|
|
|
|
if cid in seen:
|
|
|
|
self.log.debug(
|
|
|
|
"Skipping expansion of %s (previously seen)", cid)
|
|
|
|
continue
|
|
|
|
seen.add(cid)
|
|
|
|
try:
|
|
|
|
yield from self.api.tweet_detail(cid)
|
|
|
|
except Exception:
|
|
|
|
yield tweet
|
2022-06-12 17:26:51 +02:00
|
|
|
|
2023-04-05 22:10:08 +02:00
|
|
|
def _make_tweet(self, user, url, id_str):
|
2022-11-18 23:06:22 +01:00
|
|
|
return {
|
|
|
|
"id_str": id_str,
|
|
|
|
"lang": None,
|
|
|
|
"user": user,
|
2023-10-04 22:59:25 +02:00
|
|
|
"source": "><",
|
2022-11-18 23:06:22 +01:00
|
|
|
"entities": {},
|
|
|
|
"extended_entities": {
|
|
|
|
"media": [
|
|
|
|
{
|
|
|
|
"original_info": {},
|
|
|
|
"media_url": url,
|
|
|
|
},
|
|
|
|
],
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2018-08-17 20:04:11 +02:00
|
|
|
def metadata(self):
|
|
|
|
"""Return general metadata"""
|
2019-11-30 21:51:08 +01:00
|
|
|
return {}
|
2018-08-17 20:04:11 +02:00
|
|
|
|
|
|
|
def tweets(self):
|
2020-06-03 20:51:29 +02:00
|
|
|
"""Yield all relevant tweet objects"""
|
2018-08-17 20:04:11 +02:00
|
|
|
|
2019-04-07 23:06:57 +02:00
|
|
|
def login(self):
|
2023-07-21 22:38:39 +02:00
|
|
|
if self.cookies_check(self.cookies_names):
|
|
|
|
return
|
|
|
|
|
|
|
|
username, password = self._get_auth_info()
|
|
|
|
if username:
|
|
|
|
self.cookies_update(_login_impl(self, username, password))
|
2018-08-17 20:04:11 +02:00
|
|
|
|
|
|
|
|
2023-07-18 16:42:55 +02:00
|
|
|
class TwitterUserExtractor(TwitterExtractor):
|
|
|
|
"""Extractor for a Twitter user"""
|
|
|
|
subcategory = "user"
|
2021-01-20 00:33:57 +01:00
|
|
|
pattern = (BASE_PATTERN + r"/(?!search)(?:([^/?#]+)/?(?:$|[?#])"
|
|
|
|
r"|i(?:/user/|ntent/user\?user_id=)(\d+))")
|
2023-09-11 16:30:55 +02:00
|
|
|
example = "https://twitter.com/USER"
|
2018-08-19 20:36:33 +02:00
|
|
|
|
2020-09-08 23:17:50 +02:00
|
|
|
def __init__(self, match):
|
|
|
|
TwitterExtractor.__init__(self, match)
|
2021-01-20 00:33:57 +01:00
|
|
|
user_id = match.group(2)
|
|
|
|
if user_id:
|
|
|
|
self.user = "id:" + user_id
|
2020-09-08 23:17:50 +02:00
|
|
|
|
2023-07-25 20:09:44 +02:00
|
|
|
def initialize(self):
|
|
|
|
pass
|
|
|
|
|
2023-07-18 16:42:55 +02:00
|
|
|
def items(self):
|
|
|
|
base = "{}/{}/".format(self.root, self.user)
|
|
|
|
return self._dispatch_extractors((
|
|
|
|
(TwitterAvatarExtractor , base + "photo"),
|
|
|
|
(TwitterBackgroundExtractor, base + "header_photo"),
|
|
|
|
(TwitterTimelineExtractor , base + "timeline"),
|
|
|
|
(TwitterTweetsExtractor , base + "tweets"),
|
|
|
|
(TwitterMediaExtractor , base + "media"),
|
|
|
|
(TwitterRepliesExtractor , base + "with_replies"),
|
|
|
|
(TwitterLikesExtractor , base + "likes"),
|
|
|
|
), ("timeline",))
|
|
|
|
|
|
|
|
|
|
|
|
class TwitterTimelineExtractor(TwitterExtractor):
|
|
|
|
"""Extractor for a Twitter user timeline"""
|
|
|
|
subcategory = "timeline"
|
|
|
|
pattern = BASE_PATTERN + r"/(?!search)([^/?#]+)/timeline(?!\w)"
|
2023-09-11 16:30:55 +02:00
|
|
|
example = "https://twitter.com/USER/timeline"
|
2023-07-18 16:42:55 +02:00
|
|
|
|
2018-08-19 20:36:33 +02:00
|
|
|
def tweets(self):
|
2022-05-23 18:23:21 +02:00
|
|
|
# yield initial batch of (media) tweets
|
2022-05-02 08:50:04 +02:00
|
|
|
tweet = None
|
2022-07-03 14:29:15 +02:00
|
|
|
for tweet in self._select_tweet_source()(self.user):
|
2022-05-02 08:50:04 +02:00
|
|
|
yield tweet
|
|
|
|
if tweet is None:
|
|
|
|
return
|
|
|
|
|
2022-06-11 18:07:07 +02:00
|
|
|
# build search query
|
2022-07-17 17:04:24 +02:00
|
|
|
query = "from:{} max_id:{}".format(
|
|
|
|
self._user["name"], tweet["rest_id"])
|
2022-06-11 18:07:07 +02:00
|
|
|
if self.retweets:
|
|
|
|
query += " include:retweets include:nativeretweets"
|
2022-07-20 15:12:08 +02:00
|
|
|
|
2022-06-11 18:07:07 +02:00
|
|
|
if not self.textonly:
|
2022-07-20 15:12:08 +02:00
|
|
|
# try to search for media-only tweets
|
|
|
|
tweet = None
|
2023-07-04 17:55:22 +02:00
|
|
|
for tweet in self.api.search_timeline(query + " filter:links"):
|
2022-07-20 15:12:08 +02:00
|
|
|
yield tweet
|
|
|
|
if tweet is not None:
|
|
|
|
return
|
2022-06-11 18:07:07 +02:00
|
|
|
|
2022-07-20 15:12:08 +02:00
|
|
|
# yield unfiltered search results
|
2023-07-04 17:55:22 +02:00
|
|
|
yield from self.api.search_timeline(query)
|
2022-05-02 08:50:04 +02:00
|
|
|
|
2022-07-03 14:29:15 +02:00
|
|
|
def _select_tweet_source(self):
|
|
|
|
strategy = self.config("strategy")
|
|
|
|
if strategy is None or strategy == "auto":
|
2024-01-09 23:19:39 +01:00
|
|
|
if self.retweets or self.textonly:
|
2022-07-03 14:29:15 +02:00
|
|
|
return self.api.user_tweets
|
|
|
|
else:
|
|
|
|
return self.api.user_media
|
|
|
|
if strategy == "tweets":
|
|
|
|
return self.api.user_tweets
|
2024-01-01 22:58:42 +01:00
|
|
|
if strategy == "media":
|
|
|
|
return self.api.user_media
|
2022-07-03 14:29:15 +02:00
|
|
|
if strategy == "with_replies":
|
|
|
|
return self.api.user_tweets_and_replies
|
2024-01-01 22:58:42 +01:00
|
|
|
raise exception.StopExtraction("Invalid strategy '%s'", strategy)
|
2022-07-03 14:29:15 +02:00
|
|
|
|
2018-08-19 20:36:33 +02:00
|
|
|
|
2022-05-23 18:23:21 +02:00
|
|
|
class TwitterTweetsExtractor(TwitterExtractor):
|
|
|
|
"""Extractor for Tweets from a user's Tweets timeline"""
|
|
|
|
subcategory = "tweets"
|
|
|
|
pattern = BASE_PATTERN + r"/(?!search)([^/?#]+)/tweets(?!\w)"
|
2023-09-11 16:30:55 +02:00
|
|
|
example = "https://twitter.com/USER/tweets"
|
2022-05-23 18:23:21 +02:00
|
|
|
|
|
|
|
def tweets(self):
|
|
|
|
return self.api.user_tweets(self.user)
|
|
|
|
|
|
|
|
|
2021-09-10 20:40:43 +02:00
|
|
|
class TwitterRepliesExtractor(TwitterExtractor):
|
|
|
|
"""Extractor for Tweets from a user's timeline including replies"""
|
|
|
|
subcategory = "replies"
|
|
|
|
pattern = BASE_PATTERN + r"/(?!search)([^/?#]+)/with_replies(?!\w)"
|
2023-09-11 16:30:55 +02:00
|
|
|
example = "https://twitter.com/USER/with_replies"
|
2021-09-10 20:40:43 +02:00
|
|
|
|
|
|
|
def tweets(self):
|
2022-01-23 01:44:55 +01:00
|
|
|
return self.api.user_tweets_and_replies(self.user)
|
2021-09-10 20:40:43 +02:00
|
|
|
|
|
|
|
|
2018-08-19 20:36:33 +02:00
|
|
|
class TwitterMediaExtractor(TwitterExtractor):
|
2021-09-10 20:40:43 +02:00
|
|
|
"""Extractor for Tweets from a user's Media timeline"""
|
2018-08-19 20:36:33 +02:00
|
|
|
subcategory = "media"
|
2020-10-22 23:12:59 +02:00
|
|
|
pattern = BASE_PATTERN + r"/(?!search)([^/?#]+)/media(?!\w)"
|
2023-09-11 16:30:55 +02:00
|
|
|
example = "https://twitter.com/USER/media"
|
2018-08-19 20:36:33 +02:00
|
|
|
|
|
|
|
def tweets(self):
|
2022-01-23 01:44:55 +01:00
|
|
|
return self.api.user_media(self.user)
|
2018-08-19 20:36:33 +02:00
|
|
|
|
2019-10-17 18:34:07 +02:00
|
|
|
|
2020-06-16 14:27:22 +02:00
|
|
|
class TwitterLikesExtractor(TwitterExtractor):
|
|
|
|
"""Extractor for liked tweets"""
|
|
|
|
subcategory = "likes"
|
2020-10-22 23:12:59 +02:00
|
|
|
pattern = BASE_PATTERN + r"/(?!search)([^/?#]+)/likes(?!\w)"
|
2023-09-11 16:30:55 +02:00
|
|
|
example = "https://twitter.com/USER/likes"
|
2020-06-16 14:27:22 +02:00
|
|
|
|
2021-04-02 02:52:01 +02:00
|
|
|
def metadata(self):
|
|
|
|
return {"user_likes": self.user}
|
|
|
|
|
2020-06-16 14:27:22 +02:00
|
|
|
def tweets(self):
|
2022-01-23 01:44:55 +01:00
|
|
|
return self.api.user_likes(self.user)
|
2020-06-16 14:27:22 +02:00
|
|
|
|
|
|
|
|
|
|
|
class TwitterBookmarkExtractor(TwitterExtractor):
|
|
|
|
"""Extractor for bookmarked tweets"""
|
|
|
|
subcategory = "bookmark"
|
2020-07-13 23:48:42 +02:00
|
|
|
pattern = BASE_PATTERN + r"/i/bookmarks()"
|
2023-09-11 16:30:55 +02:00
|
|
|
example = "https://twitter.com/i/bookmarks"
|
2020-06-16 14:27:22 +02:00
|
|
|
|
|
|
|
def tweets(self):
|
2022-01-23 01:44:55 +01:00
|
|
|
return self.api.user_bookmarks()
|
2020-06-16 14:27:22 +02:00
|
|
|
|
2023-04-06 20:16:25 +02:00
|
|
|
def _transform_tweet(self, tweet):
|
|
|
|
tdata = TwitterExtractor._transform_tweet(self, tweet)
|
|
|
|
tdata["date_bookmarked"] = text.parse_timestamp(
|
2023-09-04 18:28:43 +02:00
|
|
|
(int(tweet["sortIndex"] or 0) >> 20) // 1000)
|
2023-04-06 20:16:25 +02:00
|
|
|
return tdata
|
|
|
|
|
2020-06-16 14:27:22 +02:00
|
|
|
|
2020-11-05 22:55:38 +01:00
|
|
|
class TwitterListExtractor(TwitterExtractor):
|
|
|
|
"""Extractor for Twitter lists"""
|
|
|
|
subcategory = "list"
|
2020-11-13 06:47:45 +01:00
|
|
|
pattern = BASE_PATTERN + r"/i/lists/(\d+)/?$"
|
2023-09-11 16:30:55 +02:00
|
|
|
example = "https://twitter.com/i/lists/12345"
|
2020-11-05 22:55:38 +01:00
|
|
|
|
|
|
|
def tweets(self):
|
2022-01-23 01:44:55 +01:00
|
|
|
return self.api.list_latest_tweets_timeline(self.user)
|
2020-11-05 22:55:38 +01:00
|
|
|
|
|
|
|
|
2020-11-13 06:47:45 +01:00
|
|
|
class TwitterListMembersExtractor(TwitterExtractor):
|
|
|
|
"""Extractor for members of a Twitter list"""
|
|
|
|
subcategory = "list-members"
|
|
|
|
pattern = BASE_PATTERN + r"/i/lists/(\d+)/members"
|
2023-09-11 16:30:55 +02:00
|
|
|
example = "https://twitter.com/i/lists/12345/members"
|
2020-11-13 06:47:45 +01:00
|
|
|
|
|
|
|
def items(self):
|
|
|
|
self.login()
|
2021-03-15 22:55:24 +01:00
|
|
|
return self._users_result(TwitterAPI(self).list_members(self.user))
|
2020-11-13 06:47:45 +01:00
|
|
|
|
|
|
|
|
2021-02-22 18:18:33 +01:00
|
|
|
class TwitterFollowingExtractor(TwitterExtractor):
|
|
|
|
"""Extractor for followed users"""
|
|
|
|
subcategory = "following"
|
|
|
|
pattern = BASE_PATTERN + r"/(?!search)([^/?#]+)/following(?!\w)"
|
2023-09-11 16:30:55 +02:00
|
|
|
example = "https://twitter.com/USER/following"
|
2021-02-22 18:18:33 +01:00
|
|
|
|
|
|
|
def items(self):
|
|
|
|
self.login()
|
2021-03-15 22:55:24 +01:00
|
|
|
return self._users_result(TwitterAPI(self).user_following(self.user))
|
2021-02-22 18:18:33 +01:00
|
|
|
|
|
|
|
|
2019-10-16 18:23:10 +02:00
|
|
|
class TwitterSearchExtractor(TwitterExtractor):
|
2022-01-22 20:55:50 +01:00
|
|
|
"""Extractor for Twitter search results"""
|
2019-10-16 18:23:10 +02:00
|
|
|
subcategory = "search"
|
2020-07-13 23:48:42 +02:00
|
|
|
pattern = BASE_PATTERN + r"/search/?\?(?:[^&#]+&)*q=([^&#]+)"
|
2023-09-11 16:30:55 +02:00
|
|
|
example = "https://twitter.com/search?q=QUERY"
|
2019-10-17 18:34:07 +02:00
|
|
|
|
|
|
|
def metadata(self):
|
2020-06-07 03:10:09 +02:00
|
|
|
return {"search": text.unquote(self.user)}
|
2019-10-17 18:34:07 +02:00
|
|
|
|
2019-10-16 18:23:10 +02:00
|
|
|
def tweets(self):
|
2022-08-16 18:52:16 +02:00
|
|
|
query = text.unquote(self.user.replace("+", " "))
|
2022-07-17 19:14:32 +02:00
|
|
|
|
|
|
|
user = None
|
|
|
|
for item in query.split():
|
|
|
|
item = item.strip("()")
|
|
|
|
if item.startswith("from:"):
|
|
|
|
if user:
|
|
|
|
user = None
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
user = item[5:]
|
|
|
|
|
|
|
|
if user is not None:
|
|
|
|
try:
|
2022-07-29 20:26:22 +02:00
|
|
|
self._assign_user(self.api.user_by_screen_name(user))
|
2022-07-17 19:14:32 +02:00
|
|
|
except KeyError:
|
2022-08-16 18:52:16 +02:00
|
|
|
pass
|
2022-07-17 19:14:32 +02:00
|
|
|
|
2023-07-04 17:55:22 +02:00
|
|
|
return self.api.search_timeline(query)
|
2022-01-22 20:55:50 +01:00
|
|
|
|
|
|
|
|
2023-03-22 22:20:40 +01:00
|
|
|
class TwitterHashtagExtractor(TwitterExtractor):
|
|
|
|
"""Extractor for Twitter hashtags"""
|
|
|
|
subcategory = "hashtag"
|
|
|
|
pattern = BASE_PATTERN + r"/hashtag/([^/?#]+)"
|
2023-09-11 16:30:55 +02:00
|
|
|
example = "https://twitter.com/hashtag/NAME"
|
2023-03-22 22:20:40 +01:00
|
|
|
|
|
|
|
def items(self):
|
|
|
|
url = "{}/search?q=%23{}".format(self.root, self.user)
|
|
|
|
data = {"_extractor": TwitterSearchExtractor}
|
|
|
|
yield Message.Queue, url, data
|
|
|
|
|
|
|
|
|
2024-02-13 01:17:13 +01:00
|
|
|
class TwitterCommunityExtractor(TwitterExtractor):
|
|
|
|
"""Extractor for a Twitter community"""
|
|
|
|
subcategory = "community"
|
|
|
|
pattern = BASE_PATTERN + r"/i/communities/(\d+)"
|
|
|
|
example = "https://twitter.com/i/communities/12345"
|
|
|
|
|
|
|
|
def tweets(self):
|
|
|
|
if self.textonly:
|
|
|
|
return self.api.community_tweets_timeline(self.user)
|
|
|
|
return self.api.community_media_timeline(self.user)
|
|
|
|
|
|
|
|
|
|
|
|
class TwitterCommunitiesExtractor(TwitterExtractor):
|
|
|
|
"""Extractor for followed Twitter communities"""
|
|
|
|
subcategory = "communities"
|
|
|
|
pattern = BASE_PATTERN + r"/([^/?#]+)/communities/?$"
|
|
|
|
example = "https://twitter.com/i/communities"
|
|
|
|
|
|
|
|
def tweets(self):
|
|
|
|
return self.api.communities_main_page_timeline(self.user)
|
|
|
|
|
|
|
|
|
2022-01-22 20:55:50 +01:00
|
|
|
class TwitterEventExtractor(TwitterExtractor):
|
|
|
|
"""Extractor for Tweets from a Twitter Event"""
|
|
|
|
subcategory = "event"
|
2022-01-24 17:44:17 +01:00
|
|
|
directory_fmt = ("{category}", "Events",
|
|
|
|
"{event[id]} {event[short_title]}")
|
2022-01-22 20:55:50 +01:00
|
|
|
pattern = BASE_PATTERN + r"/i/events/(\d+)"
|
2023-09-11 16:30:55 +02:00
|
|
|
example = "https://twitter.com/i/events/12345"
|
2022-01-22 20:55:50 +01:00
|
|
|
|
|
|
|
def metadata(self):
|
|
|
|
return {"event": self.api.live_event(self.user)}
|
|
|
|
|
|
|
|
def tweets(self):
|
|
|
|
return self.api.live_event_timeline(self.user)
|
2019-10-17 18:34:07 +02:00
|
|
|
|
2018-08-19 20:36:33 +02:00
|
|
|
|
2018-08-17 20:04:11 +02:00
|
|
|
class TwitterTweetExtractor(TwitterExtractor):
|
2024-03-07 00:52:50 +01:00
|
|
|
"""Extractor for individual tweets"""
|
2018-08-17 20:04:11 +02:00
|
|
|
subcategory = "tweet"
|
2024-03-25 04:45:04 +01:00
|
|
|
pattern = BASE_PATTERN + r"/([^/?#]+|i/web)/status/(\d+)/?(?:$|[?#])"
|
2023-09-11 16:30:55 +02:00
|
|
|
example = "https://twitter.com/USER/status/12345"
|
2016-10-06 19:12:07 +02:00
|
|
|
|
|
|
|
def __init__(self, match):
|
2018-08-19 20:36:33 +02:00
|
|
|
TwitterExtractor.__init__(self, match)
|
|
|
|
self.tweet_id = match.group(2)
|
2016-10-06 19:12:07 +02:00
|
|
|
|
2018-08-17 20:04:11 +02:00
|
|
|
def tweets(self):
|
2023-06-24 20:49:00 +02:00
|
|
|
conversations = self.config("conversations")
|
|
|
|
if conversations:
|
|
|
|
self._accessible = (conversations == "accessible")
|
2022-07-29 19:09:44 +02:00
|
|
|
return self._tweets_conversation(self.tweet_id)
|
2023-07-18 17:19:32 +02:00
|
|
|
|
|
|
|
endpoint = self.config("tweet-endpoint")
|
|
|
|
if endpoint == "detail" or endpoint in (None, "auto") and \
|
|
|
|
self.api.headers["x-twitter-auth-type"]:
|
|
|
|
return self._tweets_detail(self.tweet_id)
|
|
|
|
|
|
|
|
return self._tweets_single(self.tweet_id)
|
2022-01-21 23:34:41 +01:00
|
|
|
|
2022-07-29 19:09:44 +02:00
|
|
|
def _tweets_single(self, tweet_id):
|
2023-07-08 22:58:33 +02:00
|
|
|
tweet = self.api.tweet_result_by_rest_id(tweet_id)
|
2023-08-03 16:12:01 +02:00
|
|
|
|
|
|
|
try:
|
|
|
|
self._assign_user(tweet["core"]["user_results"]["result"])
|
|
|
|
except KeyError:
|
|
|
|
raise exception.StopExtraction(
|
|
|
|
"'%s'", tweet.get("reason") or "Unavailable")
|
|
|
|
|
|
|
|
yield tweet
|
|
|
|
|
|
|
|
if not self.quoted:
|
|
|
|
return
|
2022-01-21 23:34:41 +01:00
|
|
|
|
2023-07-08 22:58:33 +02:00
|
|
|
while True:
|
|
|
|
tweet_id = tweet["legacy"].get("quoted_status_id_str")
|
|
|
|
if not tweet_id:
|
|
|
|
break
|
|
|
|
tweet = self.api.tweet_result_by_rest_id(tweet_id)
|
2023-08-03 16:12:01 +02:00
|
|
|
tweet["legacy"]["quoted_by_id_str"] = tweet_id
|
|
|
|
yield tweet
|
2020-01-04 23:46:29 +01:00
|
|
|
|
2023-07-18 17:19:32 +02:00
|
|
|
def _tweets_detail(self, tweet_id):
|
|
|
|
tweets = []
|
|
|
|
|
|
|
|
for tweet in self.api.tweet_detail(tweet_id):
|
|
|
|
if tweet["rest_id"] == tweet_id or \
|
|
|
|
tweet.get("_retweet_id_str") == tweet_id:
|
|
|
|
if self._user_obj is None:
|
|
|
|
self._assign_user(tweet["core"]["user_results"]["result"])
|
|
|
|
tweets.append(tweet)
|
|
|
|
|
|
|
|
tweet_id = tweet["legacy"].get("quoted_status_id_str")
|
|
|
|
if not tweet_id:
|
|
|
|
break
|
|
|
|
|
|
|
|
return tweets
|
|
|
|
|
2022-07-29 19:09:44 +02:00
|
|
|
def _tweets_conversation(self, tweet_id):
|
|
|
|
tweets = self.api.tweet_detail(tweet_id)
|
|
|
|
buffer = []
|
|
|
|
|
|
|
|
for tweet in tweets:
|
|
|
|
buffer.append(tweet)
|
|
|
|
if tweet["rest_id"] == tweet_id or \
|
|
|
|
tweet.get("_retweet_id_str") == tweet_id:
|
2022-07-29 20:26:22 +02:00
|
|
|
self._assign_user(tweet["core"]["user_results"]["result"])
|
2022-07-29 19:09:44 +02:00
|
|
|
break
|
2023-06-24 20:49:00 +02:00
|
|
|
else:
|
|
|
|
# initial Tweet not accessible
|
|
|
|
if self._accessible:
|
|
|
|
return ()
|
|
|
|
return buffer
|
2022-07-29 19:09:44 +02:00
|
|
|
|
|
|
|
return itertools.chain(buffer, tweets)
|
|
|
|
|
2020-01-04 23:46:29 +01:00
|
|
|
|
2024-03-07 00:52:50 +01:00
|
|
|
class TwitterQuotesExtractor(TwitterExtractor):
|
|
|
|
"""Extractor for quotes of a Tweet"""
|
|
|
|
subcategory = "quotes"
|
|
|
|
pattern = BASE_PATTERN + r"/(?:[^/?#]+|i/web)/status/(\d+)/quotes"
|
|
|
|
example = "https://twitter.com/USER/status/12345/quotes"
|
|
|
|
|
|
|
|
def items(self):
|
|
|
|
url = "{}/search?q=quoted_tweet_id:{}".format(self.root, self.user)
|
|
|
|
data = {"_extractor": TwitterSearchExtractor}
|
|
|
|
yield Message.Queue, url, data
|
|
|
|
|
|
|
|
|
2022-11-18 23:06:22 +01:00
|
|
|
class TwitterAvatarExtractor(TwitterExtractor):
|
|
|
|
subcategory = "avatar"
|
|
|
|
filename_fmt = "avatar {date}.{extension}"
|
|
|
|
archive_fmt = "AV_{user[id]}_{date}"
|
|
|
|
pattern = BASE_PATTERN + r"/(?!search)([^/?#]+)/photo"
|
2023-09-11 16:30:55 +02:00
|
|
|
example = "https://twitter.com/USER/photo"
|
2022-11-18 23:06:22 +01:00
|
|
|
|
|
|
|
def tweets(self):
|
|
|
|
self.api._user_id_by_screen_name(self.user)
|
|
|
|
user = self._user_obj
|
|
|
|
url = user["legacy"]["profile_image_url_https"]
|
|
|
|
|
|
|
|
if url == ("https://abs.twimg.com/sticky"
|
|
|
|
"/default_profile_images/default_profile_normal.png"):
|
|
|
|
return ()
|
|
|
|
|
|
|
|
url = url.replace("_normal.", ".")
|
|
|
|
id_str = url.rsplit("/", 2)[1]
|
|
|
|
|
2023-04-05 22:10:08 +02:00
|
|
|
return (self._make_tweet(user, url, id_str),)
|
2022-11-18 23:06:22 +01:00
|
|
|
|
|
|
|
|
|
|
|
class TwitterBackgroundExtractor(TwitterExtractor):
|
|
|
|
subcategory = "background"
|
|
|
|
filename_fmt = "background {date}.{extension}"
|
|
|
|
archive_fmt = "BG_{user[id]}_{date}"
|
|
|
|
pattern = BASE_PATTERN + r"/(?!search)([^/?#]+)/header_photo"
|
2023-09-11 16:30:55 +02:00
|
|
|
example = "https://twitter.com/USER/header_photo"
|
2022-11-18 23:06:22 +01:00
|
|
|
|
|
|
|
def tweets(self):
|
|
|
|
self.api._user_id_by_screen_name(self.user)
|
2022-12-12 16:02:01 +01:00
|
|
|
user = self._user_obj
|
2022-11-18 23:06:22 +01:00
|
|
|
|
|
|
|
try:
|
|
|
|
url = user["legacy"]["profile_banner_url"]
|
|
|
|
_, timestamp = url.rsplit("/", 1)
|
|
|
|
except (KeyError, ValueError):
|
|
|
|
return ()
|
|
|
|
|
2023-04-05 22:10:08 +02:00
|
|
|
id_str = str((int(timestamp) * 1000 - 1288834974657) << 22)
|
|
|
|
return (self._make_tweet(user, url, id_str),)
|
2022-11-18 23:06:22 +01:00
|
|
|
|
|
|
|
|
2021-04-02 02:45:23 +02:00
|
|
|
class TwitterImageExtractor(Extractor):
|
|
|
|
category = "twitter"
|
|
|
|
subcategory = "image"
|
|
|
|
pattern = r"https?://pbs\.twimg\.com/media/([\w-]+)(?:\?format=|\.)(\w+)"
|
2023-09-11 16:30:55 +02:00
|
|
|
example = "https://pbs.twimg.com/media/ABCDE?format=jpg&name=orig"
|
2021-04-02 02:45:23 +02:00
|
|
|
|
|
|
|
def __init__(self, match):
|
|
|
|
Extractor.__init__(self, match)
|
|
|
|
self.id, self.fmt = match.groups()
|
2021-11-16 22:57:46 +01:00
|
|
|
TwitterExtractor._init_sizes(self)
|
2021-04-02 02:45:23 +02:00
|
|
|
|
|
|
|
def items(self):
|
2021-06-28 16:25:24 +02:00
|
|
|
base = "https://pbs.twimg.com/media/{}?format={}&name=".format(
|
|
|
|
self.id, self.fmt)
|
2021-04-02 02:45:23 +02:00
|
|
|
|
|
|
|
data = {
|
|
|
|
"filename": self.id,
|
|
|
|
"extension": self.fmt,
|
2021-11-16 22:57:46 +01:00
|
|
|
"_fallback": TwitterExtractor._image_fallback(self, base),
|
2021-04-02 02:45:23 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
yield Message.Directory, data
|
2021-11-16 22:57:46 +01:00
|
|
|
yield Message.Url, base + self._size_image, data
|
2021-04-02 02:45:23 +02:00
|
|
|
|
|
|
|
|
2020-06-03 20:51:29 +02:00
|
|
|
class TwitterAPI():
|
|
|
|
|
|
|
|
def __init__(self, extractor):
|
|
|
|
self.extractor = extractor
|
2024-03-13 22:34:25 +01:00
|
|
|
self.log = extractor.log
|
2020-12-28 22:05:48 +01:00
|
|
|
|
2023-06-01 18:53:30 +02:00
|
|
|
self.root = "https://twitter.com/i/api"
|
2023-03-17 19:25:53 +01:00
|
|
|
self._nsfw_warning = True
|
|
|
|
self._json_dumps = json.JSONEncoder(separators=(",", ":")).encode
|
|
|
|
|
2023-07-21 22:38:39 +02:00
|
|
|
cookies = extractor.cookies
|
|
|
|
cookies_domain = extractor.cookies_domain
|
2023-01-13 22:19:25 +01:00
|
|
|
|
|
|
|
csrf = extractor.config("csrf")
|
|
|
|
if csrf is None or csrf == "cookies":
|
2023-07-21 22:38:39 +02:00
|
|
|
csrf_token = cookies.get("ct0", domain=cookies_domain)
|
2023-01-13 22:19:25 +01:00
|
|
|
else:
|
|
|
|
csrf_token = None
|
|
|
|
if not csrf_token:
|
|
|
|
csrf_token = util.generate_token()
|
2023-07-21 22:38:39 +02:00
|
|
|
cookies.set("ct0", csrf_token, domain=cookies_domain)
|
2023-01-13 22:19:25 +01:00
|
|
|
|
2023-07-21 22:38:39 +02:00
|
|
|
auth_token = cookies.get("auth_token", domain=cookies_domain)
|
2023-01-13 22:19:25 +01:00
|
|
|
|
2020-06-03 20:51:29 +02:00
|
|
|
self.headers = {
|
2023-03-17 19:25:53 +01:00
|
|
|
"Accept": "*/*",
|
2024-02-12 23:17:19 +01:00
|
|
|
"Referer": "https://twitter.com/",
|
|
|
|
"content-type": "application/json",
|
2020-06-03 20:51:29 +02:00
|
|
|
"x-guest-token": None,
|
2023-01-13 22:19:25 +01:00
|
|
|
"x-twitter-auth-type": "OAuth2Session" if auth_token else None,
|
2024-02-12 23:17:19 +01:00
|
|
|
"x-csrf-token": csrf_token,
|
2020-06-03 20:51:29 +02:00
|
|
|
"x-twitter-client-language": "en",
|
|
|
|
"x-twitter-active-user": "yes",
|
2024-02-12 23:17:19 +01:00
|
|
|
"Sec-Fetch-Dest": "empty",
|
|
|
|
"Sec-Fetch-Mode": "cors",
|
|
|
|
"Sec-Fetch-Site": "same-origin",
|
|
|
|
"authorization": "Bearer AAAAAAAAAAAAAAAAAAAAANRILgAAAAAAnNwIzUejR"
|
|
|
|
"COuH5E6I8xnZz4puTs%3D1Zv7ttfk8LF81IUq16cHjhLTvJu"
|
|
|
|
"4FA33AGWWjCpTnA",
|
2020-06-03 20:51:29 +02:00
|
|
|
}
|
2022-01-22 20:55:50 +01:00
|
|
|
self.params = {
|
|
|
|
"include_profile_interstitial_type": "1",
|
|
|
|
"include_blocking": "1",
|
|
|
|
"include_blocked_by": "1",
|
|
|
|
"include_followed_by": "1",
|
|
|
|
"include_want_retweets": "1",
|
|
|
|
"include_mute_edge": "1",
|
|
|
|
"include_can_dm": "1",
|
|
|
|
"include_can_media_tag": "1",
|
|
|
|
"include_ext_has_nft_avatar": "1",
|
2023-01-13 13:45:30 +01:00
|
|
|
"include_ext_is_blue_verified": "1",
|
|
|
|
"include_ext_verified_type": "1",
|
2022-01-22 20:55:50 +01:00
|
|
|
"skip_status": "1",
|
|
|
|
"cards_platform": "Web-12",
|
|
|
|
"include_cards": "1",
|
|
|
|
"include_ext_alt_text": "true",
|
2023-01-13 13:45:30 +01:00
|
|
|
"include_ext_limited_action_results": "false",
|
2022-01-22 20:55:50 +01:00
|
|
|
"include_quote_count": "true",
|
|
|
|
"include_reply_count": "1",
|
|
|
|
"tweet_mode": "extended",
|
2023-01-13 13:45:30 +01:00
|
|
|
"include_ext_collab_control": "true",
|
|
|
|
"include_ext_views": "true",
|
2022-01-22 20:55:50 +01:00
|
|
|
"include_entities": "true",
|
|
|
|
"include_user_entities": "true",
|
|
|
|
"include_ext_media_color": "true",
|
|
|
|
"include_ext_media_availability": "true",
|
|
|
|
"include_ext_sensitive_media_warning": "true",
|
2023-01-13 13:45:30 +01:00
|
|
|
"include_ext_trusted_friends_metadata": "true",
|
2022-01-22 20:55:50 +01:00
|
|
|
"send_error_codes": "true",
|
|
|
|
"simple_quoted_tweet": "true",
|
2023-01-13 13:45:30 +01:00
|
|
|
"q": None,
|
2022-01-22 20:55:50 +01:00
|
|
|
"count": "100",
|
2023-01-13 13:45:30 +01:00
|
|
|
"query_source": None,
|
2022-01-22 20:55:50 +01:00
|
|
|
"cursor": None,
|
2023-01-13 13:45:30 +01:00
|
|
|
"pc": None,
|
|
|
|
"spelling_corrections": None,
|
|
|
|
"include_ext_edit_control": "true",
|
|
|
|
"ext": "mediaStats,highlightedLabel,hasNftAvatar,voiceInfo,"
|
|
|
|
"enrichments,superFollowMetadata,unmentionInfo,editControl,"
|
|
|
|
"collab_control,vibe",
|
2022-01-22 20:55:50 +01:00
|
|
|
}
|
2023-03-17 19:25:53 +01:00
|
|
|
self.features = {
|
2024-02-12 23:17:19 +01:00
|
|
|
"hidden_profile_likes_enabled": True,
|
|
|
|
"hidden_profile_subscriptions_enabled": True,
|
2023-03-17 19:25:53 +01:00
|
|
|
"responsive_web_graphql_exclude_directive_enabled": True,
|
|
|
|
"verified_phone_label_enabled": False,
|
2023-06-01 18:53:30 +02:00
|
|
|
"highlights_tweets_tab_ui_enabled": True,
|
2024-02-12 23:17:19 +01:00
|
|
|
"responsive_web_twitter_article_notes_tab_enabled": True,
|
2023-06-01 18:53:30 +02:00
|
|
|
"creator_subscriptions_tweet_preview_api_enabled": True,
|
|
|
|
"responsive_web_graphql_"
|
|
|
|
"skip_user_profile_image_extensions_enabled": False,
|
2023-03-17 19:25:53 +01:00
|
|
|
"responsive_web_graphql_timeline_navigation_enabled": True,
|
|
|
|
}
|
|
|
|
self.features_pagination = {
|
|
|
|
"responsive_web_graphql_exclude_directive_enabled": True,
|
|
|
|
"verified_phone_label_enabled": False,
|
2023-06-01 18:53:30 +02:00
|
|
|
"creator_subscriptions_tweet_preview_api_enabled": True,
|
2023-03-17 19:25:53 +01:00
|
|
|
"responsive_web_graphql_timeline_navigation_enabled": True,
|
|
|
|
"responsive_web_graphql_skip_user_profile_"
|
|
|
|
"image_extensions_enabled": False,
|
2024-02-12 23:17:19 +01:00
|
|
|
"c9s_tweet_anatomy_moderator_badge_enabled": True,
|
2023-03-17 19:25:53 +01:00
|
|
|
"tweetypie_unmention_optimization_enabled": True,
|
|
|
|
"responsive_web_edit_tweet_api_enabled": True,
|
|
|
|
"graphql_is_translatable_rweb_tweet_is_translatable_enabled": True,
|
|
|
|
"view_counts_everywhere_api_enabled": True,
|
|
|
|
"longform_notetweets_consumption_enabled": True,
|
2024-02-12 23:17:19 +01:00
|
|
|
"responsive_web_twitter_article_tweet_consumption_enabled": True,
|
2023-03-17 19:25:53 +01:00
|
|
|
"tweet_awards_web_tipping_enabled": False,
|
2023-06-01 18:53:30 +02:00
|
|
|
"freedom_of_speech_not_reach_fetch_enabled": True,
|
2023-03-17 19:25:53 +01:00
|
|
|
"standardized_nudges_misinfo": True,
|
|
|
|
"tweet_with_visibility_results_prefer_gql_"
|
2024-02-12 23:17:19 +01:00
|
|
|
"limited_actions_policy_enabled": True,
|
|
|
|
"rweb_video_timestamps_enabled": True,
|
2023-06-01 18:53:30 +02:00
|
|
|
"longform_notetweets_rich_text_read_enabled": True,
|
2024-02-12 23:17:19 +01:00
|
|
|
"longform_notetweets_inline_media_enabled": True,
|
|
|
|
"responsive_web_media_download_video_enabled": True,
|
2023-03-17 19:25:53 +01:00
|
|
|
"responsive_web_enhance_cards_enabled": False,
|
|
|
|
}
|
2022-01-21 23:34:41 +01:00
|
|
|
|
2023-07-08 22:58:33 +02:00
|
|
|
def tweet_result_by_rest_id(self, tweet_id):
|
2024-02-12 23:17:19 +01:00
|
|
|
endpoint = "/graphql/MWY3AO9_I3rcP_L2A4FR4A/TweetResultByRestId"
|
|
|
|
variables = {
|
|
|
|
"tweetId": tweet_id,
|
|
|
|
"withCommunity": False,
|
|
|
|
"includePromotedContent": False,
|
|
|
|
"withVoice": False,
|
|
|
|
}
|
2023-07-08 22:58:33 +02:00
|
|
|
params = {
|
2024-02-12 23:17:19 +01:00
|
|
|
"variables": self._json_dumps(variables),
|
|
|
|
"features" : self._json_dumps(self.features_pagination),
|
2023-07-08 22:58:33 +02:00
|
|
|
}
|
2023-08-03 16:12:01 +02:00
|
|
|
tweet = self._call(endpoint, params)["data"]["tweetResult"]["result"]
|
2023-08-06 22:08:50 +02:00
|
|
|
if "tweet" in tweet:
|
|
|
|
tweet = tweet["tweet"]
|
2023-08-03 16:12:01 +02:00
|
|
|
|
|
|
|
if tweet.get("__typename") == "TweetUnavailable":
|
|
|
|
reason = tweet.get("reason")
|
|
|
|
if reason == "NsfwLoggedOut":
|
|
|
|
raise exception.AuthorizationError("NSFW Tweet")
|
|
|
|
if reason == "Protected":
|
|
|
|
raise exception.AuthorizationError("Protected Tweet")
|
|
|
|
raise exception.StopExtraction("Tweet unavailable ('%s')", reason)
|
|
|
|
|
|
|
|
return tweet
|
2023-07-08 22:58:33 +02:00
|
|
|
|
2022-01-21 23:34:41 +01:00
|
|
|
def tweet_detail(self, tweet_id):
|
2024-02-12 23:17:19 +01:00
|
|
|
endpoint = "/graphql/B9_KmbkLhXt6jRwGjJrweg/TweetDetail"
|
2022-01-21 23:34:41 +01:00
|
|
|
variables = {
|
|
|
|
"focalTweetId": tweet_id,
|
2023-03-17 19:25:53 +01:00
|
|
|
"referrer": "profile",
|
2022-01-21 23:34:41 +01:00
|
|
|
"with_rux_injections": False,
|
2023-11-10 23:40:33 +01:00
|
|
|
"includePromotedContent": False,
|
2022-01-21 23:34:41 +01:00
|
|
|
"withCommunity": True,
|
|
|
|
"withQuickPromoteEligibilityTweetFields": True,
|
2023-06-01 18:53:30 +02:00
|
|
|
"withBirdwatchNotes": True,
|
2023-03-17 19:25:53 +01:00
|
|
|
"withVoice": True,
|
|
|
|
"withV2Timeline": True,
|
2022-01-21 23:34:41 +01:00
|
|
|
}
|
|
|
|
return self._pagination_tweets(
|
2023-03-17 19:25:53 +01:00
|
|
|
endpoint, variables, ("threaded_conversation_with_injections_v2",))
|
2022-01-21 23:34:41 +01:00
|
|
|
|
|
|
|
def user_tweets(self, screen_name):
|
2024-02-12 23:17:19 +01:00
|
|
|
endpoint = "/graphql/5ICa5d9-AitXZrIA3H-4MQ/UserTweets"
|
2022-01-21 23:34:41 +01:00
|
|
|
variables = {
|
|
|
|
"userId": self._user_id_by_screen_name(screen_name),
|
|
|
|
"count": 100,
|
2023-11-10 23:40:33 +01:00
|
|
|
"includePromotedContent": False,
|
2022-01-21 23:34:41 +01:00
|
|
|
"withQuickPromoteEligibilityTweetFields": True,
|
2023-03-17 19:25:53 +01:00
|
|
|
"withVoice": True,
|
|
|
|
"withV2Timeline": True,
|
2022-01-21 23:34:41 +01:00
|
|
|
}
|
|
|
|
return self._pagination_tweets(endpoint, variables)
|
|
|
|
|
|
|
|
def user_tweets_and_replies(self, screen_name):
|
2024-02-12 23:17:19 +01:00
|
|
|
endpoint = "/graphql/UtLStR_BnYUGD7Q453UXQg/UserTweetsAndReplies"
|
2022-01-21 23:34:41 +01:00
|
|
|
variables = {
|
|
|
|
"userId": self._user_id_by_screen_name(screen_name),
|
|
|
|
"count": 100,
|
2023-11-10 23:40:33 +01:00
|
|
|
"includePromotedContent": False,
|
2022-01-21 23:34:41 +01:00
|
|
|
"withCommunity": True,
|
2023-03-17 19:25:53 +01:00
|
|
|
"withVoice": True,
|
|
|
|
"withV2Timeline": True,
|
2022-01-21 23:34:41 +01:00
|
|
|
}
|
|
|
|
return self._pagination_tweets(endpoint, variables)
|
|
|
|
|
|
|
|
def user_media(self, screen_name):
|
2024-02-12 23:17:19 +01:00
|
|
|
endpoint = "/graphql/tO4LMUYAZbR4T0SqQ85aAw/UserMedia"
|
2022-01-21 23:34:41 +01:00
|
|
|
variables = {
|
|
|
|
"userId": self._user_id_by_screen_name(screen_name),
|
|
|
|
"count": 100,
|
2023-03-17 19:25:53 +01:00
|
|
|
"includePromotedContent": False,
|
|
|
|
"withClientEventToken": False,
|
|
|
|
"withBirdwatchNotes": False,
|
|
|
|
"withVoice": True,
|
|
|
|
"withV2Timeline": True,
|
2022-01-21 23:34:41 +01:00
|
|
|
}
|
|
|
|
return self._pagination_tweets(endpoint, variables)
|
|
|
|
|
|
|
|
def user_likes(self, screen_name):
|
2024-02-12 23:17:19 +01:00
|
|
|
endpoint = "/graphql/9s8V6sUI8fZLDiN-REkAxA/Likes"
|
2022-01-21 23:34:41 +01:00
|
|
|
variables = {
|
|
|
|
"userId": self._user_id_by_screen_name(screen_name),
|
|
|
|
"count": 100,
|
2023-03-17 19:25:53 +01:00
|
|
|
"includePromotedContent": False,
|
|
|
|
"withClientEventToken": False,
|
|
|
|
"withBirdwatchNotes": False,
|
|
|
|
"withVoice": True,
|
|
|
|
"withV2Timeline": True,
|
2022-01-21 23:34:41 +01:00
|
|
|
}
|
|
|
|
return self._pagination_tweets(endpoint, variables)
|
|
|
|
|
|
|
|
def user_bookmarks(self):
|
2024-02-12 23:17:19 +01:00
|
|
|
endpoint = "/graphql/cQxQgX8MJYjWwC0dxpyfYg/Bookmarks"
|
2022-01-21 23:34:41 +01:00
|
|
|
variables = {
|
|
|
|
"count": 100,
|
2024-02-12 23:17:19 +01:00
|
|
|
"includePromotedContent": False,
|
2022-01-21 23:34:41 +01:00
|
|
|
}
|
2023-04-03 16:18:31 +02:00
|
|
|
features = self.features_pagination.copy()
|
2023-04-03 21:50:22 +02:00
|
|
|
features["graphql_timeline_v2_bookmark_timeline"] = True
|
2022-01-21 23:34:41 +01:00
|
|
|
return self._pagination_tweets(
|
2023-04-03 21:50:22 +02:00
|
|
|
endpoint, variables, ("bookmark_timeline_v2", "timeline"), False,
|
2023-04-03 16:18:31 +02:00
|
|
|
features=features)
|
2022-01-21 23:34:41 +01:00
|
|
|
|
|
|
|
def list_latest_tweets_timeline(self, list_id):
|
2024-02-12 23:17:19 +01:00
|
|
|
endpoint = "/graphql/HjsWc-nwwHKYwHenbHm-tw/ListLatestTweetsTimeline"
|
2022-01-21 23:34:41 +01:00
|
|
|
variables = {
|
|
|
|
"listId": list_id,
|
|
|
|
"count": 100,
|
|
|
|
}
|
|
|
|
return self._pagination_tweets(
|
|
|
|
endpoint, variables, ("list", "tweets_timeline", "timeline"))
|
|
|
|
|
2023-06-01 21:23:31 +02:00
|
|
|
def search_timeline(self, query):
|
2024-02-12 23:17:19 +01:00
|
|
|
endpoint = "/graphql/fZK7JipRHWtiZsTodhsTfQ/SearchTimeline"
|
2023-06-01 21:23:31 +02:00
|
|
|
variables = {
|
|
|
|
"rawQuery": query,
|
2024-02-12 23:17:19 +01:00
|
|
|
"count": 100,
|
|
|
|
"querySource": "",
|
2023-06-01 21:23:31 +02:00
|
|
|
"product": "Latest",
|
|
|
|
}
|
2024-02-12 23:17:19 +01:00
|
|
|
|
2023-06-01 21:23:31 +02:00
|
|
|
return self._pagination_tweets(
|
|
|
|
endpoint, variables,
|
2024-02-12 23:17:19 +01:00
|
|
|
("search_by_raw_query", "search_timeline", "timeline"))
|
2023-06-01 21:23:31 +02:00
|
|
|
|
2024-02-13 01:17:13 +01:00
|
|
|
def community_tweets_timeline(self, community_id):
|
|
|
|
endpoint = "/graphql/7B2AdxSuC-Er8qUr3Plm_w/CommunityTweetsTimeline"
|
|
|
|
variables = {
|
|
|
|
"communityId": community_id,
|
|
|
|
"count": 100,
|
|
|
|
"displayLocation": "Community",
|
|
|
|
"rankingMode": "Recency",
|
|
|
|
"withCommunity": True,
|
|
|
|
}
|
|
|
|
return self._pagination_tweets(
|
|
|
|
endpoint, variables,
|
|
|
|
("communityResults", "result", "ranked_community_timeline",
|
|
|
|
"timeline"))
|
|
|
|
|
|
|
|
def community_media_timeline(self, community_id):
|
|
|
|
endpoint = "/graphql/qAGUldfcIoMv5KyAyVLYog/CommunityMediaTimeline"
|
|
|
|
variables = {
|
|
|
|
"communityId": community_id,
|
|
|
|
"count": 100,
|
|
|
|
"withCommunity": True,
|
|
|
|
}
|
|
|
|
return self._pagination_tweets(
|
|
|
|
endpoint, variables,
|
|
|
|
("communityResults", "result", "community_media_timeline",
|
|
|
|
"timeline"))
|
|
|
|
|
|
|
|
def communities_main_page_timeline(self, screen_name):
|
|
|
|
endpoint = ("/graphql/GtOhw2mstITBepTRppL6Uw"
|
|
|
|
"/CommunitiesMainPageTimeline")
|
|
|
|
variables = {
|
|
|
|
"count": 100,
|
|
|
|
"withCommunity": True,
|
|
|
|
}
|
|
|
|
return self._pagination_tweets(
|
|
|
|
endpoint, variables,
|
|
|
|
("viewer", "communities_timeline", "timeline"))
|
|
|
|
|
2022-01-22 20:55:50 +01:00
|
|
|
def live_event_timeline(self, event_id):
|
|
|
|
endpoint = "/2/live_event/timeline/{}.json".format(event_id)
|
|
|
|
params = self.params.copy()
|
|
|
|
params["timeline_id"] = "recap"
|
|
|
|
params["urt"] = "true"
|
|
|
|
params["get_annotations"] = "true"
|
|
|
|
return self._pagination_legacy(endpoint, params)
|
|
|
|
|
|
|
|
def live_event(self, event_id):
|
|
|
|
endpoint = "/1.1/live_event/1/{}/timeline.json".format(event_id)
|
|
|
|
params = self.params.copy()
|
|
|
|
params["count"] = "0"
|
|
|
|
params["urt"] = "true"
|
|
|
|
return (self._call(endpoint, params)
|
|
|
|
["twitter_objects"]["live_events"][event_id])
|
2020-06-03 20:51:29 +02:00
|
|
|
|
2021-02-22 18:18:33 +01:00
|
|
|
def list_members(self, list_id):
|
2024-02-12 23:17:19 +01:00
|
|
|
endpoint = "/graphql/BQp2IEYkgxuSxqbTAr1e1g/ListMembers"
|
2021-02-22 18:18:33 +01:00
|
|
|
variables = {
|
|
|
|
"listId": list_id,
|
2022-01-21 23:34:41 +01:00
|
|
|
"count": 100,
|
|
|
|
"withSafetyModeUserFields": True,
|
2021-02-22 18:18:33 +01:00
|
|
|
}
|
2022-01-21 23:34:41 +01:00
|
|
|
return self._pagination_users(
|
|
|
|
endpoint, variables, ("list", "members_timeline", "timeline"))
|
2021-02-22 18:18:33 +01:00
|
|
|
|
|
|
|
def user_following(self, screen_name):
|
2024-02-12 23:17:19 +01:00
|
|
|
endpoint = "/graphql/PAnE9toEjRfE-4tozRcsfw/Following"
|
2021-02-22 18:18:33 +01:00
|
|
|
variables = {
|
|
|
|
"userId": self._user_id_by_screen_name(screen_name),
|
2022-01-21 23:34:41 +01:00
|
|
|
"count": 100,
|
2023-03-17 19:25:53 +01:00
|
|
|
"includePromotedContent": False,
|
2021-02-22 18:18:33 +01:00
|
|
|
}
|
2022-01-21 23:34:41 +01:00
|
|
|
return self._pagination_users(endpoint, variables)
|
2021-02-22 18:18:33 +01:00
|
|
|
|
2023-10-25 16:45:27 +02:00
|
|
|
@memcache(keyarg=1)
|
2022-02-01 18:24:03 +01:00
|
|
|
def user_by_rest_id(self, rest_id):
|
2024-02-12 23:17:19 +01:00
|
|
|
endpoint = "/graphql/tD8zKvQzwY3kdx5yz6YmOw/UserByRestId"
|
|
|
|
features = self.features
|
2023-03-17 19:25:53 +01:00
|
|
|
params = {
|
|
|
|
"variables": self._json_dumps({
|
|
|
|
"userId": rest_id,
|
|
|
|
"withSafetyModeUserFields": True,
|
|
|
|
}),
|
2023-06-01 18:53:30 +02:00
|
|
|
"features": self._json_dumps(features),
|
2023-03-17 19:25:53 +01:00
|
|
|
}
|
2022-02-01 18:24:03 +01:00
|
|
|
return self._call(endpoint, params)["data"]["user"]["result"]
|
|
|
|
|
2023-10-25 16:45:27 +02:00
|
|
|
@memcache(keyarg=1)
|
2020-06-03 20:51:29 +02:00
|
|
|
def user_by_screen_name(self, screen_name):
|
2024-02-12 23:17:19 +01:00
|
|
|
endpoint = "/graphql/k5XapwcSikNsEsILW5FvgA/UserByScreenName"
|
|
|
|
features = self.features.copy()
|
|
|
|
features["subscriptions_verification_info_"
|
|
|
|
"is_identity_verified_enabled"] = True
|
|
|
|
features["subscriptions_verification_info_"
|
|
|
|
"verified_since_enabled"] = True
|
2023-03-17 19:25:53 +01:00
|
|
|
params = {
|
|
|
|
"variables": self._json_dumps({
|
|
|
|
"screen_name": screen_name,
|
|
|
|
"withSafetyModeUserFields": True,
|
|
|
|
}),
|
2024-02-12 23:17:19 +01:00
|
|
|
"features": self._json_dumps(features),
|
2023-03-17 19:25:53 +01:00
|
|
|
}
|
2022-01-23 17:31:07 +01:00
|
|
|
return self._call(endpoint, params)["data"]["user"]["result"]
|
2020-06-03 20:51:29 +02:00
|
|
|
|
2020-09-08 22:56:52 +02:00
|
|
|
def _user_id_by_screen_name(self, screen_name):
|
2022-11-26 12:02:05 +01:00
|
|
|
user = ()
|
|
|
|
try:
|
|
|
|
if screen_name.startswith("id:"):
|
|
|
|
user = self.user_by_rest_id(screen_name[3:])
|
|
|
|
else:
|
2022-07-17 17:04:24 +02:00
|
|
|
user = self.user_by_screen_name(screen_name)
|
2022-11-26 12:02:05 +01:00
|
|
|
self.extractor._assign_user(user)
|
|
|
|
return user["rest_id"]
|
|
|
|
except KeyError:
|
|
|
|
if "unavailable_message" in user:
|
|
|
|
raise exception.NotFoundError("{} ({})".format(
|
|
|
|
user["unavailable_message"].get("text"),
|
|
|
|
user.get("reason")), False)
|
|
|
|
else:
|
|
|
|
raise exception.NotFoundError("user")
|
2020-09-08 22:56:52 +02:00
|
|
|
|
2020-06-18 00:28:38 +02:00
|
|
|
@cache(maxage=3600)
|
|
|
|
def _guest_token(self):
|
2020-12-28 22:05:48 +01:00
|
|
|
endpoint = "/1.1/guest/activate.json"
|
2024-03-13 22:34:25 +01:00
|
|
|
self.log.info("Requesting guest token")
|
2023-06-01 18:53:30 +02:00
|
|
|
return str(self._call(
|
|
|
|
endpoint, None, "POST", False, "https://api.twitter.com",
|
|
|
|
)["guest_token"])
|
2020-06-18 00:28:38 +02:00
|
|
|
|
2023-01-15 22:10:21 +01:00
|
|
|
def _authenticate_guest(self):
|
|
|
|
guest_token = self._guest_token()
|
|
|
|
if guest_token != self.headers["x-guest-token"]:
|
|
|
|
self.headers["x-guest-token"] = guest_token
|
2023-07-21 22:38:39 +02:00
|
|
|
self.extractor.cookies.set(
|
|
|
|
"gt", guest_token, domain=self.extractor.cookies_domain)
|
2023-01-15 22:10:21 +01:00
|
|
|
|
2023-06-01 18:53:30 +02:00
|
|
|
def _call(self, endpoint, params, method="GET", auth=True, root=None):
|
|
|
|
url = (root or self.root) + endpoint
|
2020-07-06 23:13:05 +02:00
|
|
|
|
2021-01-19 23:15:57 +01:00
|
|
|
while True:
|
2023-01-13 22:19:25 +01:00
|
|
|
if not self.headers["x-twitter-auth-type"] and auth:
|
2023-01-15 22:10:21 +01:00
|
|
|
self._authenticate_guest()
|
2023-01-13 22:19:25 +01:00
|
|
|
|
2021-01-19 23:15:57 +01:00
|
|
|
response = self.extractor.request(
|
2023-01-13 22:19:25 +01:00
|
|
|
url, method=method, params=params,
|
2021-01-19 23:15:57 +01:00
|
|
|
headers=self.headers, fatal=None)
|
|
|
|
|
|
|
|
# update 'x-csrf-token' header (#1170)
|
|
|
|
csrf_token = response.cookies.get("ct0")
|
|
|
|
if csrf_token:
|
|
|
|
self.headers["x-csrf-token"] = csrf_token
|
|
|
|
|
|
|
|
if response.status_code < 400:
|
2023-12-05 15:57:26 +01:00
|
|
|
data = response.json()
|
|
|
|
|
2024-03-13 22:34:25 +01:00
|
|
|
errors = data.get("errors")
|
|
|
|
if not errors:
|
|
|
|
return data
|
2023-12-05 15:57:26 +01:00
|
|
|
|
2024-03-13 22:34:25 +01:00
|
|
|
retry = False
|
|
|
|
for error in errors:
|
|
|
|
msg = error.get("message") or "Unspecified"
|
|
|
|
self.log.debug("API error: '%s'", msg)
|
|
|
|
|
|
|
|
if "this account is temporarily locked" in msg:
|
|
|
|
msg = "Account temporarily locked"
|
|
|
|
if self.extractor.config("locked") != "wait":
|
|
|
|
raise exception.AuthorizationError(msg)
|
|
|
|
self.log.warning("%s. Press ENTER to retry.", msg)
|
|
|
|
try:
|
|
|
|
input()
|
|
|
|
except (EOFError, OSError):
|
|
|
|
pass
|
|
|
|
retry = True
|
|
|
|
|
|
|
|
elif msg.lower().startswith("timeout"):
|
|
|
|
retry = True
|
|
|
|
|
|
|
|
if not retry:
|
|
|
|
return data
|
|
|
|
elif self.headers["x-twitter-auth-type"]:
|
|
|
|
self.log.debug("Retrying API request")
|
|
|
|
continue
|
2023-12-05 15:57:26 +01:00
|
|
|
|
|
|
|
# fall through to "Login Required"
|
|
|
|
response.status_code = 404
|
2021-08-12 19:11:41 +02:00
|
|
|
|
2021-01-19 23:15:57 +01:00
|
|
|
if response.status_code == 429:
|
2021-08-12 19:11:41 +02:00
|
|
|
# rate limit exceeded
|
2023-07-04 18:17:32 +02:00
|
|
|
if self.extractor.config("ratelimit") == "abort":
|
|
|
|
raise exception.StopExtraction("Rate limit exceeded")
|
|
|
|
|
2021-01-19 23:15:57 +01:00
|
|
|
until = response.headers.get("x-rate-limit-reset")
|
|
|
|
seconds = None if until else 60
|
|
|
|
self.extractor.wait(until=until, seconds=seconds)
|
|
|
|
continue
|
2021-08-12 19:11:41 +02:00
|
|
|
|
2023-12-05 15:13:58 +01:00
|
|
|
if response.status_code in (403, 404) and \
|
|
|
|
not self.headers["x-twitter-auth-type"]:
|
|
|
|
raise exception.AuthorizationError("Login required")
|
2023-06-01 16:03:45 +02:00
|
|
|
|
2021-08-12 19:11:41 +02:00
|
|
|
# error
|
2022-02-28 16:32:43 +01:00
|
|
|
try:
|
|
|
|
data = response.json()
|
|
|
|
errors = ", ".join(e["message"] for e in data["errors"])
|
|
|
|
except ValueError:
|
|
|
|
errors = response.text
|
|
|
|
except Exception:
|
|
|
|
errors = data.get("errors", "")
|
|
|
|
|
2021-01-19 23:15:57 +01:00
|
|
|
raise exception.StopExtraction(
|
2021-11-13 22:44:11 +01:00
|
|
|
"%s %s (%s)", response.status_code, response.reason, errors)
|
2020-06-03 20:51:29 +02:00
|
|
|
|
2022-01-22 20:55:50 +01:00
|
|
|
def _pagination_legacy(self, endpoint, params):
|
2020-09-28 23:03:35 +02:00
|
|
|
original_retweets = (self.extractor.retweets == "original")
|
2023-01-20 06:18:23 +01:00
|
|
|
bottom = ("cursor-bottom-", "sq-cursor-bottom")
|
2020-03-05 22:55:26 +01:00
|
|
|
|
|
|
|
while True:
|
2020-06-03 20:51:29 +02:00
|
|
|
data = self._call(endpoint, params)
|
2020-06-07 03:10:09 +02:00
|
|
|
|
2023-01-16 14:58:30 +01:00
|
|
|
instructions = data["timeline"]["instructions"]
|
|
|
|
if not instructions:
|
2020-06-07 03:10:09 +02:00
|
|
|
return
|
2022-12-14 00:24:55 +01:00
|
|
|
|
2020-03-05 22:55:26 +01:00
|
|
|
tweets = data["globalObjects"]["tweets"]
|
2020-06-03 20:51:29 +02:00
|
|
|
users = data["globalObjects"]["users"]
|
2022-12-14 00:24:55 +01:00
|
|
|
tweet_id = cursor = None
|
|
|
|
tweet_ids = []
|
2023-01-16 14:58:30 +01:00
|
|
|
entries = ()
|
|
|
|
|
|
|
|
# process instructions
|
|
|
|
for instr in instructions:
|
|
|
|
if "addEntries" in instr:
|
|
|
|
entries = instr["addEntries"]["entries"]
|
|
|
|
elif "replaceEntry" in instr:
|
|
|
|
entry = instr["replaceEntry"]["entry"]
|
2023-01-20 06:18:23 +01:00
|
|
|
if entry["entryId"].startswith(bottom):
|
2023-01-16 14:58:30 +01:00
|
|
|
cursor = (entry["content"]["operation"]
|
|
|
|
["cursor"]["value"])
|
2020-06-03 20:51:29 +02:00
|
|
|
|
2020-12-28 23:34:46 +01:00
|
|
|
# collect tweet IDs and cursor value
|
2023-01-16 14:58:30 +01:00
|
|
|
for entry in entries:
|
2020-12-28 23:34:46 +01:00
|
|
|
entry_startswith = entry["entryId"].startswith
|
|
|
|
|
|
|
|
if entry_startswith(("tweet-", "sq-I-t-")):
|
|
|
|
tweet_ids.append(
|
|
|
|
entry["content"]["item"]["content"]["tweet"]["id"])
|
2020-06-03 20:51:29 +02:00
|
|
|
|
2020-12-28 23:34:46 +01:00
|
|
|
elif entry_startswith("homeConversation-"):
|
|
|
|
tweet_ids.extend(
|
|
|
|
entry["content"]["timelineModule"]["metadata"]
|
|
|
|
["conversationMetadata"]["allTweetIds"][::-1])
|
|
|
|
|
2023-01-20 06:18:23 +01:00
|
|
|
elif entry_startswith(bottom):
|
2020-06-07 03:10:09 +02:00
|
|
|
cursor = entry["content"]["operation"]["cursor"]
|
2022-01-22 20:55:50 +01:00
|
|
|
if not cursor.get("stopOnEmptyResponse", True):
|
2020-06-07 03:10:09 +02:00
|
|
|
# keep going even if there are no tweets
|
2022-12-14 00:24:55 +01:00
|
|
|
tweet_id = True
|
2020-06-07 03:10:09 +02:00
|
|
|
cursor = cursor["value"]
|
|
|
|
|
2021-02-26 13:50:46 +01:00
|
|
|
elif entry_startswith("conversationThread-"):
|
|
|
|
tweet_ids.extend(
|
|
|
|
item["entryId"][6:]
|
|
|
|
for item in entry["content"]["timelineModule"]["items"]
|
|
|
|
if item["entryId"].startswith("tweet-")
|
|
|
|
)
|
|
|
|
|
2020-12-28 23:34:46 +01:00
|
|
|
# process tweets
|
|
|
|
for tweet_id in tweet_ids:
|
|
|
|
try:
|
|
|
|
tweet = tweets[tweet_id]
|
|
|
|
except KeyError:
|
2024-03-13 22:34:25 +01:00
|
|
|
self.log.debug("Skipping %s (deleted)", tweet_id)
|
2020-12-28 23:34:46 +01:00
|
|
|
continue
|
|
|
|
|
|
|
|
if "retweeted_status_id_str" in tweet:
|
|
|
|
retweet = tweets.get(tweet["retweeted_status_id_str"])
|
|
|
|
if original_retweets:
|
|
|
|
if not retweet:
|
|
|
|
continue
|
2021-07-02 21:47:22 +02:00
|
|
|
retweet["retweeted_status_id_str"] = retweet["id_str"]
|
2020-12-28 23:34:46 +01:00
|
|
|
retweet["_retweet_id_str"] = tweet["id_str"]
|
|
|
|
tweet = retweet
|
|
|
|
elif retweet:
|
|
|
|
tweet["author"] = users[retweet["user_id_str"]]
|
2021-05-14 22:46:06 +02:00
|
|
|
if "extended_entities" in retweet and \
|
|
|
|
"extended_entities" not in tweet:
|
|
|
|
tweet["extended_entities"] = \
|
|
|
|
retweet["extended_entities"]
|
2020-12-28 23:34:46 +01:00
|
|
|
tweet["user"] = users[tweet["user_id_str"]]
|
|
|
|
yield tweet
|
|
|
|
|
|
|
|
if "quoted_status_id_str" in tweet:
|
|
|
|
quoted = tweets.get(tweet["quoted_status_id_str"])
|
|
|
|
if quoted:
|
2021-08-25 20:04:22 +02:00
|
|
|
quoted = quoted.copy()
|
2020-12-28 23:34:46 +01:00
|
|
|
quoted["author"] = users[quoted["user_id_str"]]
|
2022-07-17 18:50:21 +02:00
|
|
|
quoted["quoted_by"] = tweet["user"]["screen_name"]
|
2021-09-25 18:15:14 +02:00
|
|
|
quoted["quoted_by_id_str"] = tweet["id_str"]
|
2020-12-28 23:34:46 +01:00
|
|
|
yield quoted
|
|
|
|
|
2023-01-16 14:58:30 +01:00
|
|
|
# stop on empty response
|
2022-12-14 00:24:55 +01:00
|
|
|
if not cursor or (not tweets and not tweet_id):
|
2020-03-05 22:55:26 +01:00
|
|
|
return
|
2020-06-03 20:51:29 +02:00
|
|
|
params["cursor"] = cursor
|
2020-11-13 06:47:45 +01:00
|
|
|
|
2022-11-09 20:40:51 +01:00
|
|
|
def _pagination_tweets(self, endpoint, variables,
|
2023-04-03 16:18:31 +02:00
|
|
|
path=None, stop_tweets=True, features=None):
|
2022-02-01 18:24:03 +01:00
|
|
|
extr = self.extractor
|
|
|
|
original_retweets = (extr.retweets == "original")
|
|
|
|
pinned_tweet = extr.pinned
|
2022-01-21 23:34:41 +01:00
|
|
|
|
2023-03-17 20:54:35 +01:00
|
|
|
params = {"variables": None}
|
2023-04-03 16:18:31 +02:00
|
|
|
if features is None:
|
|
|
|
features = self.features_pagination
|
2023-03-17 20:54:35 +01:00
|
|
|
if features:
|
2023-04-03 16:18:31 +02:00
|
|
|
params["features"] = self._json_dumps(features)
|
2023-03-17 20:54:35 +01:00
|
|
|
|
2022-01-21 23:34:41 +01:00
|
|
|
while True:
|
2023-03-17 20:54:35 +01:00
|
|
|
params["variables"] = self._json_dumps(variables)
|
2022-01-21 23:34:41 +01:00
|
|
|
data = self._call(endpoint, params)["data"]
|
|
|
|
|
2022-01-22 23:09:45 +01:00
|
|
|
try:
|
|
|
|
if path is None:
|
2023-03-17 19:25:53 +01:00
|
|
|
instructions = (data["user"]["result"]["timeline_v2"]
|
2022-01-22 23:09:45 +01:00
|
|
|
["timeline"]["instructions"])
|
|
|
|
else:
|
2022-01-29 23:08:33 +01:00
|
|
|
instructions = data
|
2022-01-22 23:09:45 +01:00
|
|
|
for key in path:
|
2022-01-29 23:08:33 +01:00
|
|
|
instructions = instructions[key]
|
|
|
|
instructions = instructions["instructions"]
|
2022-01-23 17:31:07 +01:00
|
|
|
|
2023-06-01 21:23:31 +02:00
|
|
|
cursor = None
|
|
|
|
entries = None
|
2022-02-07 23:18:35 +01:00
|
|
|
for instr in instructions:
|
2023-06-01 21:23:31 +02:00
|
|
|
instr_type = instr.get("type")
|
|
|
|
if instr_type == "TimelineAddEntries":
|
2023-12-09 15:38:42 +01:00
|
|
|
if entries:
|
|
|
|
entries.extend(instr["entries"])
|
|
|
|
else:
|
|
|
|
entries = instr["entries"]
|
|
|
|
elif instr_type == "TimelineAddToModule":
|
|
|
|
entries = instr["moduleItems"]
|
2023-06-01 21:23:31 +02:00
|
|
|
elif instr_type == "TimelineReplaceEntry":
|
|
|
|
entry = instr["entry"]
|
|
|
|
if entry["entryId"].startswith("cursor-bottom-"):
|
|
|
|
cursor = entry["content"]["value"]
|
|
|
|
if entries is None:
|
2023-07-08 22:49:34 +02:00
|
|
|
if not cursor:
|
|
|
|
return
|
|
|
|
entries = ()
|
2022-02-07 23:18:35 +01:00
|
|
|
|
2022-02-11 00:42:49 +01:00
|
|
|
except LookupError:
|
2022-02-01 18:24:03 +01:00
|
|
|
extr.log.debug(data)
|
|
|
|
|
2022-07-17 17:04:24 +02:00
|
|
|
user = extr._user_obj
|
|
|
|
if user:
|
|
|
|
user = user["legacy"]
|
|
|
|
if user.get("blocked_by"):
|
2022-02-01 18:24:03 +01:00
|
|
|
if self.headers["x-twitter-auth-type"] and \
|
|
|
|
extr.config("logout"):
|
2023-07-21 22:38:39 +02:00
|
|
|
extr.cookies_file = None
|
|
|
|
del extr.cookies["auth_token"]
|
2022-02-01 18:24:03 +01:00
|
|
|
self.headers["x-twitter-auth-type"] = None
|
|
|
|
extr.log.info("Retrying API request as guest")
|
|
|
|
continue
|
|
|
|
raise exception.AuthorizationError(
|
|
|
|
"{} blocked your account".format(
|
|
|
|
user["screen_name"]))
|
|
|
|
elif user.get("protected"):
|
|
|
|
raise exception.AuthorizationError(
|
|
|
|
"{}'s Tweets are protected".format(
|
|
|
|
user["screen_name"]))
|
|
|
|
|
|
|
|
raise exception.StopExtraction(
|
|
|
|
"Unable to retrieve Tweets from this timeline")
|
2022-01-21 23:34:41 +01:00
|
|
|
|
2022-01-23 17:31:07 +01:00
|
|
|
tweets = []
|
2023-06-01 21:23:31 +02:00
|
|
|
tweet = None
|
2022-01-23 17:31:07 +01:00
|
|
|
|
2022-01-21 23:34:41 +01:00
|
|
|
if pinned_tweet:
|
|
|
|
pinned_tweet = False
|
|
|
|
if instructions[-1]["type"] == "TimelinePinEntry":
|
2022-01-23 22:52:57 +01:00
|
|
|
tweets.append(instructions[-1]["entry"])
|
2022-01-21 23:34:41 +01:00
|
|
|
|
2022-01-23 17:31:07 +01:00
|
|
|
for entry in entries:
|
2022-01-21 23:34:41 +01:00
|
|
|
esw = entry["entryId"].startswith
|
|
|
|
|
|
|
|
if esw("tweet-"):
|
2022-01-22 00:41:58 +01:00
|
|
|
tweets.append(entry)
|
2024-02-13 01:17:13 +01:00
|
|
|
elif esw(("profile-grid-",
|
|
|
|
"communities-grid-")):
|
2023-12-09 15:38:42 +01:00
|
|
|
if "content" in entry:
|
|
|
|
tweets.extend(entry["content"]["items"])
|
|
|
|
else:
|
|
|
|
tweets.append(entry)
|
2023-04-21 15:04:41 +02:00
|
|
|
elif esw(("homeConversation-",
|
|
|
|
"profile-conversation-",
|
|
|
|
"conversationthread-")):
|
2022-01-22 00:41:58 +01:00
|
|
|
tweets.extend(entry["content"]["items"])
|
2022-03-03 01:51:52 +01:00
|
|
|
elif esw("tombstone-"):
|
2022-03-31 20:31:58 +02:00
|
|
|
item = entry["content"]["itemContent"]
|
|
|
|
item["tweet_results"] = \
|
|
|
|
{"result": {"tombstone": item["tombstoneInfo"]}}
|
|
|
|
tweets.append(entry)
|
2022-01-21 23:34:41 +01:00
|
|
|
elif esw("cursor-bottom-"):
|
|
|
|
cursor = entry["content"]
|
2022-06-13 16:27:30 +02:00
|
|
|
if "itemContent" in cursor:
|
|
|
|
cursor = cursor["itemContent"]
|
2022-01-22 20:55:50 +01:00
|
|
|
if not cursor.get("stopOnEmptyResponse", True):
|
2022-01-21 23:34:41 +01:00
|
|
|
# keep going even if there are no tweets
|
|
|
|
tweet = True
|
2022-01-22 00:41:58 +01:00
|
|
|
cursor = cursor.get("value")
|
|
|
|
|
2022-01-25 16:13:22 +01:00
|
|
|
for entry in tweets:
|
2022-01-22 00:41:58 +01:00
|
|
|
try:
|
2023-11-10 23:40:33 +01:00
|
|
|
item = ((entry.get("content") or entry["item"])
|
|
|
|
["itemContent"])
|
|
|
|
if "promotedMetadata" in item and not extr.ads:
|
|
|
|
extr.log.debug(
|
|
|
|
"Skipping %s (ad)",
|
|
|
|
(entry.get("entryId") or "").rpartition("-")[2])
|
|
|
|
continue
|
|
|
|
|
|
|
|
tweet = item["tweet_results"]["result"]
|
2022-03-03 01:51:52 +01:00
|
|
|
if "tombstone" in tweet:
|
2022-03-31 20:31:58 +02:00
|
|
|
tweet = self._process_tombstone(
|
|
|
|
entry, tweet["tombstone"])
|
|
|
|
if not tweet:
|
|
|
|
continue
|
2023-11-10 23:40:33 +01:00
|
|
|
|
2022-03-03 01:56:14 +01:00
|
|
|
if "tweet" in tweet:
|
|
|
|
tweet = tweet["tweet"]
|
2022-01-25 16:13:22 +01:00
|
|
|
legacy = tweet["legacy"]
|
2023-04-06 19:22:48 +02:00
|
|
|
tweet["sortIndex"] = entry.get("sortIndex")
|
2022-01-22 00:41:58 +01:00
|
|
|
except KeyError:
|
2022-02-01 18:24:03 +01:00
|
|
|
extr.log.debug(
|
2022-01-22 00:41:58 +01:00
|
|
|
"Skipping %s (deleted)",
|
2022-01-25 16:13:22 +01:00
|
|
|
(entry.get("entryId") or "").rpartition("-")[2])
|
2022-01-22 00:41:58 +01:00
|
|
|
continue
|
|
|
|
|
|
|
|
if "retweeted_status_result" in legacy:
|
|
|
|
retweet = legacy["retweeted_status_result"]["result"]
|
2023-03-08 18:33:19 +01:00
|
|
|
if "tweet" in retweet:
|
|
|
|
retweet = retweet["tweet"]
|
2022-01-22 00:41:58 +01:00
|
|
|
if original_retweets:
|
2022-01-25 23:52:44 +01:00
|
|
|
try:
|
|
|
|
retweet["legacy"]["retweeted_status_id_str"] = \
|
|
|
|
retweet["rest_id"]
|
|
|
|
retweet["_retweet_id_str"] = tweet["rest_id"]
|
|
|
|
tweet = retweet
|
|
|
|
except KeyError:
|
2022-01-22 00:41:58 +01:00
|
|
|
continue
|
2022-01-25 23:52:44 +01:00
|
|
|
else:
|
|
|
|
try:
|
|
|
|
legacy["retweeted_status_id_str"] = \
|
|
|
|
retweet["rest_id"]
|
2022-07-17 17:04:24 +02:00
|
|
|
tweet["author"] = \
|
2022-01-25 23:52:44 +01:00
|
|
|
retweet["core"]["user_results"]["result"]
|
2023-04-04 15:48:09 +02:00
|
|
|
|
|
|
|
rtlegacy = retweet["legacy"]
|
2023-10-27 23:01:43 +02:00
|
|
|
|
|
|
|
if "note_tweet" in retweet:
|
|
|
|
tweet["note_tweet"] = retweet["note_tweet"]
|
|
|
|
|
2023-04-04 15:48:09 +02:00
|
|
|
if "extended_entities" in rtlegacy and \
|
2022-01-25 23:52:44 +01:00
|
|
|
"extended_entities" not in legacy:
|
|
|
|
legacy["extended_entities"] = \
|
2023-04-04 15:48:09 +02:00
|
|
|
rtlegacy["extended_entities"]
|
2023-10-27 23:01:43 +02:00
|
|
|
|
2023-04-04 15:48:09 +02:00
|
|
|
if "withheld_scope" in rtlegacy and \
|
|
|
|
"withheld_scope" not in legacy:
|
|
|
|
legacy["withheld_scope"] = \
|
|
|
|
rtlegacy["withheld_scope"]
|
2023-10-27 23:01:43 +02:00
|
|
|
|
|
|
|
legacy["full_text"] = rtlegacy["full_text"]
|
2022-01-25 23:52:44 +01:00
|
|
|
except KeyError:
|
|
|
|
pass
|
|
|
|
|
2022-01-22 00:41:58 +01:00
|
|
|
yield tweet
|
|
|
|
|
|
|
|
if "quoted_status_result" in tweet:
|
2022-01-25 16:13:22 +01:00
|
|
|
try:
|
|
|
|
quoted = tweet["quoted_status_result"]["result"]
|
2022-07-17 18:50:21 +02:00
|
|
|
quoted["legacy"]["quoted_by"] = (
|
|
|
|
tweet["core"]["user_results"]["result"]
|
|
|
|
["legacy"]["screen_name"])
|
2022-01-25 16:13:22 +01:00
|
|
|
quoted["legacy"]["quoted_by_id_str"] = tweet["rest_id"]
|
2023-04-06 19:22:48 +02:00
|
|
|
quoted["sortIndex"] = entry.get("sortIndex")
|
|
|
|
|
2022-01-25 16:13:22 +01:00
|
|
|
yield quoted
|
|
|
|
except KeyError:
|
2022-02-01 18:24:03 +01:00
|
|
|
extr.log.debug(
|
2022-01-25 16:13:22 +01:00
|
|
|
"Skipping quote of %s (deleted)",
|
|
|
|
tweet.get("rest_id"))
|
|
|
|
continue
|
2022-01-21 23:34:41 +01:00
|
|
|
|
2022-11-09 20:40:51 +01:00
|
|
|
if stop_tweets and not tweet:
|
|
|
|
return
|
|
|
|
if not cursor or cursor == variables.get("cursor"):
|
2022-01-21 23:34:41 +01:00
|
|
|
return
|
|
|
|
variables["cursor"] = cursor
|
|
|
|
|
|
|
|
def _pagination_users(self, endpoint, variables, path=None):
|
2024-03-13 22:34:25 +01:00
|
|
|
params = {
|
|
|
|
"variables": None,
|
|
|
|
"features" : self._json_dumps(self.features_pagination),
|
|
|
|
}
|
2022-01-23 01:44:55 +01:00
|
|
|
|
2020-11-13 06:47:45 +01:00
|
|
|
while True:
|
2023-07-10 14:39:09 +02:00
|
|
|
cursor = entry = None
|
2023-03-17 20:54:35 +01:00
|
|
|
params["variables"] = self._json_dumps(variables)
|
2022-01-21 23:34:41 +01:00
|
|
|
data = self._call(endpoint, params)["data"]
|
2020-11-13 06:47:45 +01:00
|
|
|
|
2022-01-22 23:09:45 +01:00
|
|
|
try:
|
|
|
|
if path is None:
|
|
|
|
instructions = (data["user"]["result"]["timeline"]
|
|
|
|
["timeline"]["instructions"])
|
|
|
|
else:
|
|
|
|
for key in path:
|
|
|
|
data = data[key]
|
|
|
|
instructions = data["instructions"]
|
|
|
|
except KeyError:
|
|
|
|
return
|
2020-11-13 06:47:45 +01:00
|
|
|
|
|
|
|
for instr in instructions:
|
|
|
|
if instr["type"] == "TimelineAddEntries":
|
|
|
|
for entry in instr["entries"]:
|
|
|
|
if entry["entryId"].startswith("user-"):
|
2022-08-23 22:35:28 +02:00
|
|
|
try:
|
|
|
|
user = (entry["content"]["itemContent"]
|
|
|
|
["user_results"]["result"])
|
|
|
|
except KeyError:
|
|
|
|
pass
|
|
|
|
else:
|
|
|
|
if "rest_id" in user:
|
|
|
|
yield user
|
2020-11-13 06:47:45 +01:00
|
|
|
elif entry["entryId"].startswith("cursor-bottom-"):
|
|
|
|
cursor = entry["content"]["value"]
|
|
|
|
|
2023-07-10 14:39:09 +02:00
|
|
|
if not cursor or cursor.startswith(("-1|", "0|")) or not entry:
|
2020-11-13 06:47:45 +01:00
|
|
|
return
|
|
|
|
variables["cursor"] = cursor
|
2022-03-03 01:51:52 +01:00
|
|
|
|
2022-03-31 20:31:58 +02:00
|
|
|
def _process_tombstone(self, entry, tombstone):
|
2022-03-03 01:51:52 +01:00
|
|
|
text = (tombstone.get("richText") or tombstone["text"])["text"]
|
2022-03-31 20:31:58 +02:00
|
|
|
tweet_id = entry["entryId"].rpartition("-")[2]
|
|
|
|
|
|
|
|
if text.startswith("Age-restricted"):
|
2023-12-20 14:38:36 +01:00
|
|
|
if self._nsfw_warning:
|
2022-03-31 20:31:58 +02:00
|
|
|
self._nsfw_warning = False
|
2024-03-13 22:34:25 +01:00
|
|
|
self.log.warning('"%s"', text)
|
2022-03-31 20:31:58 +02:00
|
|
|
|
2024-03-13 22:34:25 +01:00
|
|
|
self.log.debug("Skipping %s ('%s')", tweet_id, text)
|
2022-03-31 20:31:58 +02:00
|
|
|
|
2022-11-19 23:05:20 +01:00
|
|
|
|
2023-12-18 23:19:44 +01:00
|
|
|
@cache(maxage=365*86400, keyarg=1)
|
2022-11-19 23:05:20 +01:00
|
|
|
def _login_impl(extr, username, password):
|
|
|
|
|
|
|
|
import re
|
|
|
|
import random
|
|
|
|
|
|
|
|
if re.fullmatch(r"[\w.%+-]+@[\w.-]+\.\w{2,}", username):
|
|
|
|
extr.log.warning(
|
|
|
|
"Login with email is no longer possible. "
|
|
|
|
"You need to provide your username or phone number instead.")
|
|
|
|
|
|
|
|
def process(response):
|
|
|
|
try:
|
|
|
|
data = response.json()
|
|
|
|
except ValueError:
|
|
|
|
data = {"errors": ({"message": "Invalid response"},)}
|
|
|
|
else:
|
|
|
|
if response.status_code < 400:
|
|
|
|
return data["flow_token"]
|
|
|
|
|
|
|
|
errors = []
|
|
|
|
for error in data.get("errors") or ():
|
|
|
|
msg = error.get("message")
|
|
|
|
errors.append('"{}"'.format(msg) if msg else "Unknown error")
|
|
|
|
extr.log.debug(response.text)
|
|
|
|
raise exception.AuthenticationError(", ".join(errors))
|
|
|
|
|
2023-07-21 22:38:39 +02:00
|
|
|
extr.cookies.clear()
|
2022-11-19 23:05:20 +01:00
|
|
|
api = TwitterAPI(extr)
|
2023-01-15 22:10:21 +01:00
|
|
|
api._authenticate_guest()
|
2022-11-19 23:05:20 +01:00
|
|
|
headers = api.headers
|
2023-01-15 22:10:21 +01:00
|
|
|
|
|
|
|
extr.log.info("Logging in as %s", username)
|
2022-11-19 23:05:20 +01:00
|
|
|
|
|
|
|
# init
|
|
|
|
data = {
|
|
|
|
"input_flow_data": {
|
|
|
|
"flow_context": {
|
|
|
|
"debug_overrides": {},
|
|
|
|
"start_location": {"location": "unknown"},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"subtask_versions": {
|
|
|
|
"action_list": 2,
|
|
|
|
"alert_dialog": 1,
|
|
|
|
"app_download_cta": 1,
|
|
|
|
"check_logged_in_account": 1,
|
|
|
|
"choice_selection": 3,
|
|
|
|
"contacts_live_sync_permission_prompt": 0,
|
|
|
|
"cta": 7,
|
|
|
|
"email_verification": 2,
|
|
|
|
"end_flow": 1,
|
|
|
|
"enter_date": 1,
|
|
|
|
"enter_email": 2,
|
|
|
|
"enter_password": 5,
|
|
|
|
"enter_phone": 2,
|
|
|
|
"enter_recaptcha": 1,
|
|
|
|
"enter_text": 5,
|
|
|
|
"enter_username": 2,
|
|
|
|
"generic_urt": 3,
|
|
|
|
"in_app_notification": 1,
|
|
|
|
"interest_picker": 3,
|
|
|
|
"js_instrumentation": 1,
|
|
|
|
"menu_dialog": 1,
|
|
|
|
"notifications_permission_prompt": 2,
|
|
|
|
"open_account": 2,
|
|
|
|
"open_home_timeline": 1,
|
|
|
|
"open_link": 1,
|
|
|
|
"phone_verification": 4,
|
|
|
|
"privacy_options": 1,
|
|
|
|
"security_key": 3,
|
|
|
|
"select_avatar": 4,
|
|
|
|
"select_banner": 2,
|
|
|
|
"settings_list": 7,
|
|
|
|
"show_code": 1,
|
|
|
|
"sign_up": 2,
|
|
|
|
"sign_up_review": 4,
|
|
|
|
"tweet_selection_urt": 1,
|
|
|
|
"update_users": 1,
|
|
|
|
"upload_media": 1,
|
|
|
|
"user_recommendations_list": 4,
|
|
|
|
"user_recommendations_urt": 1,
|
|
|
|
"wait_spinner": 3,
|
|
|
|
"web_modal": 1,
|
|
|
|
},
|
|
|
|
}
|
2023-01-15 22:10:21 +01:00
|
|
|
url = "https://api.twitter.com/1.1/onboarding/task.json?flow_name=login"
|
2022-11-19 23:05:20 +01:00
|
|
|
response = extr.request(url, method="POST", headers=headers, json=data)
|
|
|
|
|
|
|
|
data = {
|
|
|
|
"flow_token": process(response),
|
|
|
|
"subtask_inputs": [
|
|
|
|
{
|
|
|
|
"subtask_id": "LoginJsInstrumentationSubtask",
|
|
|
|
"js_instrumentation": {
|
|
|
|
"response": "{}",
|
|
|
|
"link": "next_link",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
],
|
|
|
|
}
|
2023-01-15 22:10:21 +01:00
|
|
|
url = "https://api.twitter.com/1.1/onboarding/task.json"
|
2022-11-19 23:05:20 +01:00
|
|
|
response = extr.request(
|
|
|
|
url, method="POST", headers=headers, json=data, fatal=None)
|
|
|
|
|
|
|
|
# username
|
|
|
|
data = {
|
|
|
|
"flow_token": process(response),
|
|
|
|
"subtask_inputs": [
|
|
|
|
{
|
|
|
|
"subtask_id": "LoginEnterUserIdentifierSSO",
|
|
|
|
"settings_list": {
|
|
|
|
"setting_responses": [
|
|
|
|
{
|
|
|
|
"key": "user_identifier",
|
|
|
|
"response_data": {
|
|
|
|
"text_data": {"result": username},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
],
|
|
|
|
"link": "next_link",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
],
|
|
|
|
}
|
2023-01-15 22:10:21 +01:00
|
|
|
# url = "https://api.twitter.com/1.1/onboarding/task.json"
|
2022-11-19 23:05:20 +01:00
|
|
|
extr.sleep(random.uniform(2.0, 4.0), "login (username)")
|
|
|
|
response = extr.request(
|
|
|
|
url, method="POST", headers=headers, json=data, fatal=None)
|
|
|
|
|
|
|
|
# password
|
|
|
|
data = {
|
|
|
|
"flow_token": process(response),
|
|
|
|
"subtask_inputs": [
|
|
|
|
{
|
|
|
|
"subtask_id": "LoginEnterPassword",
|
|
|
|
"enter_password": {
|
|
|
|
"password": password,
|
|
|
|
"link": "next_link",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
],
|
|
|
|
}
|
2023-01-15 22:10:21 +01:00
|
|
|
# url = "https://api.twitter.com/1.1/onboarding/task.json"
|
2022-11-19 23:05:20 +01:00
|
|
|
extr.sleep(random.uniform(2.0, 4.0), "login (password)")
|
|
|
|
response = extr.request(
|
|
|
|
url, method="POST", headers=headers, json=data, fatal=None)
|
|
|
|
|
|
|
|
# account duplication check ?
|
|
|
|
data = {
|
|
|
|
"flow_token": process(response),
|
|
|
|
"subtask_inputs": [
|
|
|
|
{
|
|
|
|
"subtask_id": "AccountDuplicationCheck",
|
|
|
|
"check_logged_in_account": {
|
|
|
|
"link": "AccountDuplicationCheck_false",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
],
|
|
|
|
}
|
2023-01-15 22:10:21 +01:00
|
|
|
# url = "https://api.twitter.com/1.1/onboarding/task.json"
|
2022-11-19 23:05:20 +01:00
|
|
|
response = extr.request(
|
|
|
|
url, method="POST", headers=headers, json=data, fatal=None)
|
|
|
|
process(response)
|
|
|
|
|
|
|
|
return {
|
|
|
|
cookie.name: cookie.value
|
2023-07-21 22:38:39 +02:00
|
|
|
for cookie in extr.cookies
|
2022-11-19 23:05:20 +01:00
|
|
|
}
|