2016-10-06 19:12:07 +02:00
|
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
|
|
2020-02-02 17:19:14 +01:00
|
|
|
|
# Copyright 2016-2020 Mike Fährmann
|
2016-10-06 19:12:07 +02:00
|
|
|
|
#
|
|
|
|
|
# This program is free software; you can redistribute it and/or modify
|
|
|
|
|
# it under the terms of the GNU General Public License version 2 as
|
|
|
|
|
# published by the Free Software Foundation.
|
|
|
|
|
|
2020-02-02 17:19:14 +01:00
|
|
|
|
"""Extractors for https://twitter.com/"""
|
2016-10-06 19:12:07 +02:00
|
|
|
|
|
|
|
|
|
from .common import Extractor, Message
|
2019-04-07 23:06:57 +02:00
|
|
|
|
from .. import text, exception
|
2020-06-10 20:58:42 +02:00
|
|
|
|
from ..cache import cache
|
2020-06-03 20:51:29 +02:00
|
|
|
|
import hashlib
|
|
|
|
|
import time
|
2016-10-06 19:12:07 +02:00
|
|
|
|
|
2017-02-01 00:53:19 +01:00
|
|
|
|
|
2020-07-13 23:48:42 +02:00
|
|
|
|
BASE_PATTERN = (
|
|
|
|
|
r"(?:https?://)?(?:www\.|mobile\.)?"
|
|
|
|
|
r"(?:twitter\.com|nitter\.net)"
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
2018-08-17 20:04:11 +02:00
|
|
|
|
class TwitterExtractor(Extractor):
|
|
|
|
|
"""Base class for twitter extractors"""
|
2016-10-06 19:12:07 +02:00
|
|
|
|
category = "twitter"
|
2020-06-06 23:51:54 +02:00
|
|
|
|
directory_fmt = ("{category}", "{user[name]}")
|
|
|
|
|
filename_fmt = "{tweet_id}_{num}.{extension}"
|
|
|
|
|
archive_fmt = "{tweet_id}_{retweet_id}_{num}"
|
2020-03-12 22:02:12 +01:00
|
|
|
|
cookiedomain = ".twitter.com"
|
2018-08-17 20:04:11 +02:00
|
|
|
|
root = "https://twitter.com"
|
2019-04-30 15:43:43 +02:00
|
|
|
|
sizes = (":orig", ":large", ":medium", ":small")
|
2018-08-17 20:04:11 +02:00
|
|
|
|
|
2018-08-19 20:36:33 +02:00
|
|
|
|
def __init__(self, match):
|
2019-02-11 13:31:10 +01:00
|
|
|
|
Extractor.__init__(self, match)
|
2018-08-19 20:36:33 +02:00
|
|
|
|
self.user = match.group(1)
|
|
|
|
|
self.retweets = self.config("retweets", True)
|
2020-04-29 23:11:24 +02:00
|
|
|
|
self.replies = self.config("replies", True)
|
2020-01-18 21:26:46 +01:00
|
|
|
|
self.twitpic = self.config("twitpic", False)
|
2020-06-24 21:13:16 +02:00
|
|
|
|
self.quoted = self.config("quoted", True)
|
2020-02-14 01:03:42 +01:00
|
|
|
|
self.videos = self.config("videos", True)
|
2020-06-06 23:51:54 +02:00
|
|
|
|
self._user_cache = {}
|
2018-09-30 18:41:39 +02:00
|
|
|
|
|
2018-08-17 20:04:11 +02:00
|
|
|
|
def items(self):
|
2019-04-07 23:06:57 +02:00
|
|
|
|
self.login()
|
2019-11-30 21:51:08 +01:00
|
|
|
|
metadata = self.metadata()
|
2018-08-17 20:04:11 +02:00
|
|
|
|
yield Message.Version, 1
|
|
|
|
|
|
|
|
|
|
for tweet in self.tweets():
|
2020-06-04 01:22:34 +02:00
|
|
|
|
|
2020-07-11 00:41:50 +02:00
|
|
|
|
if not self.retweets and "retweeted_status_id_str" in tweet:
|
|
|
|
|
self.log.debug("Skipping %s (retweet)", tweet["id_str"])
|
|
|
|
|
continue
|
|
|
|
|
if not self.replies and "in_reply_to_user_id_str" in tweet:
|
|
|
|
|
self.log.debug("Skipping %s (reply)", tweet["id_str"])
|
|
|
|
|
continue
|
|
|
|
|
if not self.quoted and "quoted" in tweet:
|
|
|
|
|
self.log.debug("Skipping %s (quoted tweet)", tweet["id_str"])
|
2020-06-03 20:51:29 +02:00
|
|
|
|
continue
|
|
|
|
|
|
2020-06-04 01:22:34 +02:00
|
|
|
|
if self.twitpic:
|
|
|
|
|
self._extract_twitpic(tweet)
|
2020-06-03 20:51:29 +02:00
|
|
|
|
if "extended_entities" not in tweet:
|
2018-08-17 20:04:11 +02:00
|
|
|
|
continue
|
|
|
|
|
|
2020-06-06 23:51:54 +02:00
|
|
|
|
tdata = self._transform_tweet(tweet)
|
|
|
|
|
tdata.update(metadata)
|
2020-06-04 18:21:54 +02:00
|
|
|
|
|
2020-06-06 23:51:54 +02:00
|
|
|
|
yield Message.Directory, tdata
|
|
|
|
|
for tdata["num"], media in enumerate(
|
|
|
|
|
tweet["extended_entities"]["media"], 1):
|
2020-06-04 18:21:54 +02:00
|
|
|
|
|
2020-06-06 23:51:54 +02:00
|
|
|
|
tdata["width"] = media["original_info"].get("width", 0)
|
|
|
|
|
tdata["height"] = media["original_info"].get("height", 0)
|
2020-06-03 20:51:29 +02:00
|
|
|
|
|
2020-06-16 14:10:51 +02:00
|
|
|
|
if "video_info" in media:
|
2020-06-03 20:51:29 +02:00
|
|
|
|
|
|
|
|
|
if self.videos == "ytdl":
|
|
|
|
|
url = "ytdl:{}/i/web/status/{}".format(
|
|
|
|
|
self.root, tweet["id_str"])
|
2020-06-06 23:51:54 +02:00
|
|
|
|
tdata["extension"] = None
|
|
|
|
|
yield Message.Url, url, tdata
|
2020-06-03 20:51:29 +02:00
|
|
|
|
|
2020-06-16 14:10:51 +02:00
|
|
|
|
elif self.videos:
|
2020-06-03 20:51:29 +02:00
|
|
|
|
video_info = media["video_info"]
|
|
|
|
|
variant = max(
|
|
|
|
|
video_info["variants"],
|
|
|
|
|
key=lambda v: v.get("bitrate", 0),
|
|
|
|
|
)
|
2020-06-06 23:51:54 +02:00
|
|
|
|
tdata["duration"] = video_info.get(
|
2020-06-03 20:51:29 +02:00
|
|
|
|
"duration_millis", 0) / 1000
|
2020-06-06 23:51:54 +02:00
|
|
|
|
tdata["bitrate"] = variant.get("bitrate", 0)
|
2020-06-03 20:51:29 +02:00
|
|
|
|
|
|
|
|
|
url = variant["url"]
|
2020-06-06 23:51:54 +02:00
|
|
|
|
text.nameext_from_url(url, tdata)
|
|
|
|
|
yield Message.Url, url, tdata
|
2019-11-30 21:51:08 +01:00
|
|
|
|
|
2020-06-04 01:22:34 +02:00
|
|
|
|
elif "media_url_https" in media:
|
2020-06-03 20:51:29 +02:00
|
|
|
|
url = media["media_url_https"]
|
2019-11-30 21:51:08 +01:00
|
|
|
|
urls = [url + size for size in self.sizes]
|
2020-06-06 23:51:54 +02:00
|
|
|
|
text.nameext_from_url(url, tdata)
|
|
|
|
|
yield Message.Urllist, urls, tdata
|
2020-01-18 21:26:46 +01:00
|
|
|
|
|
2020-06-04 01:22:34 +02:00
|
|
|
|
else:
|
|
|
|
|
url = media["media_url"]
|
2020-06-06 23:51:54 +02:00
|
|
|
|
text.nameext_from_url(url, tdata)
|
|
|
|
|
yield Message.Url, url, tdata
|
2020-06-04 01:22:34 +02:00
|
|
|
|
|
|
|
|
|
def _extract_twitpic(self, tweet):
|
|
|
|
|
twitpics = []
|
|
|
|
|
for url in tweet["entities"].get("urls", ()):
|
|
|
|
|
url = url["expanded_url"]
|
2020-09-21 22:21:16 +02:00
|
|
|
|
if "//twitpic.com/" in url and "/photos/" not in url:
|
2020-06-04 01:22:34 +02:00
|
|
|
|
response = self.request(url, fatal=False)
|
|
|
|
|
if response.status_code >= 400:
|
|
|
|
|
continue
|
|
|
|
|
url = text.extract(
|
|
|
|
|
response.text, 'name="twitter:image" value="', '"')[0]
|
2020-09-21 22:21:16 +02:00
|
|
|
|
if url:
|
|
|
|
|
twitpics.append({
|
|
|
|
|
"original_info": {},
|
|
|
|
|
"media_url" : url,
|
|
|
|
|
})
|
2020-06-04 01:22:34 +02:00
|
|
|
|
if twitpics:
|
|
|
|
|
if "extended_entities" in tweet:
|
|
|
|
|
tweet["extended_entities"]["media"].extend(twitpics)
|
|
|
|
|
else:
|
|
|
|
|
tweet["extended_entities"] = {"media": twitpics}
|
|
|
|
|
|
2020-06-06 23:51:54 +02:00
|
|
|
|
def _transform_tweet(self, tweet):
|
|
|
|
|
entities = tweet["entities"]
|
|
|
|
|
tdata = {
|
|
|
|
|
"tweet_id" : text.parse_int(tweet["id_str"]),
|
|
|
|
|
"retweet_id" : text.parse_int(
|
|
|
|
|
tweet.get("retweeted_status_id_str")),
|
|
|
|
|
"quote_id" : text.parse_int(
|
|
|
|
|
tweet.get("quoted_status_id_str")),
|
|
|
|
|
"reply_id" : text.parse_int(
|
|
|
|
|
tweet.get("in_reply_to_status_id_str")),
|
|
|
|
|
"date" : text.parse_datetime(
|
|
|
|
|
tweet["created_at"], "%a %b %d %H:%M:%S %z %Y"),
|
|
|
|
|
"user" : self._transform_user(tweet["user"]),
|
|
|
|
|
"lang" : tweet["lang"],
|
|
|
|
|
"content" : tweet["full_text"],
|
|
|
|
|
"favorite_count": tweet["favorite_count"],
|
|
|
|
|
"quote_count" : tweet["quote_count"],
|
|
|
|
|
"reply_count" : tweet["reply_count"],
|
|
|
|
|
"retweet_count" : tweet["retweet_count"],
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
hashtags = entities.get("hashtags")
|
|
|
|
|
if hashtags:
|
|
|
|
|
tdata["hashtags"] = [t["text"] for t in hashtags]
|
|
|
|
|
|
|
|
|
|
mentions = entities.get("user_mentions")
|
|
|
|
|
if mentions:
|
|
|
|
|
tdata["mentions"] = [{
|
|
|
|
|
"id": text.parse_int(u["id_str"]),
|
|
|
|
|
"name": u["screen_name"],
|
|
|
|
|
"nick": u["name"],
|
|
|
|
|
} for u in mentions]
|
|
|
|
|
|
2020-06-09 21:48:04 +02:00
|
|
|
|
if "in_reply_to_screen_name" in tweet:
|
|
|
|
|
tdata["reply_to"] = tweet["in_reply_to_screen_name"]
|
|
|
|
|
|
2020-06-06 23:51:54 +02:00
|
|
|
|
if "author" in tweet:
|
|
|
|
|
tdata["author"] = self._transform_user(tweet["author"])
|
2020-06-18 00:12:36 +02:00
|
|
|
|
else:
|
|
|
|
|
tdata["author"] = tdata["user"]
|
2020-06-06 23:51:54 +02:00
|
|
|
|
|
|
|
|
|
return tdata
|
|
|
|
|
|
|
|
|
|
def _transform_user(self, user):
|
|
|
|
|
uid = user["id_str"]
|
|
|
|
|
cache = self._user_cache
|
|
|
|
|
|
|
|
|
|
if uid not in cache:
|
|
|
|
|
cache[uid] = {
|
|
|
|
|
"id" : text.parse_int(uid),
|
|
|
|
|
"name" : user["screen_name"],
|
|
|
|
|
"nick" : user["name"],
|
|
|
|
|
"description" : user["description"],
|
|
|
|
|
"location" : user["location"],
|
|
|
|
|
"date" : text.parse_datetime(
|
|
|
|
|
user["created_at"], "%a %b %d %H:%M:%S %z %Y"),
|
|
|
|
|
"verified" : user.get("verified", False),
|
|
|
|
|
"profile_banner" : user.get("profile_banner_url", ""),
|
|
|
|
|
"profile_image" : user.get(
|
|
|
|
|
"profile_image_url_https", "").replace("_normal.", "."),
|
|
|
|
|
"favourites_count": user["favourites_count"],
|
|
|
|
|
"followers_count" : user["followers_count"],
|
|
|
|
|
"friends_count" : user["friends_count"],
|
|
|
|
|
"listed_count" : user["listed_count"],
|
|
|
|
|
"media_count" : user["media_count"],
|
|
|
|
|
"statuses_count" : user["statuses_count"],
|
|
|
|
|
}
|
|
|
|
|
return cache[uid]
|
|
|
|
|
|
2018-08-17 20:04:11 +02:00
|
|
|
|
def metadata(self):
|
|
|
|
|
"""Return general metadata"""
|
2019-11-30 21:51:08 +01:00
|
|
|
|
return {}
|
2018-08-17 20:04:11 +02:00
|
|
|
|
|
|
|
|
|
def tweets(self):
|
2020-06-03 20:51:29 +02:00
|
|
|
|
"""Yield all relevant tweet objects"""
|
2018-08-17 20:04:11 +02:00
|
|
|
|
|
2019-04-07 23:06:57 +02:00
|
|
|
|
def login(self):
|
|
|
|
|
username, password = self._get_auth_info()
|
|
|
|
|
if username:
|
|
|
|
|
self._update_cookies(self._login_impl(username, password))
|
|
|
|
|
|
|
|
|
|
@cache(maxage=360*24*3600, keyarg=1)
|
|
|
|
|
def _login_impl(self, username, password):
|
|
|
|
|
self.log.info("Logging in as %s", username)
|
|
|
|
|
|
2020-06-04 00:07:12 +02:00
|
|
|
|
url = "https://mobile.twitter.com/i/nojs_router"
|
|
|
|
|
params = {"path": "/login"}
|
|
|
|
|
headers = {"Referer": self.root + "/", "Origin": self.root}
|
|
|
|
|
page = self.request(
|
|
|
|
|
url, method="POST", params=params, headers=headers, data={}).text
|
|
|
|
|
|
2019-04-07 23:06:57 +02:00
|
|
|
|
pos = page.index('name="authenticity_token"')
|
2020-06-04 00:07:12 +02:00
|
|
|
|
token = text.extract(page, 'value="', '"', pos)[0]
|
2019-04-07 23:06:57 +02:00
|
|
|
|
|
2020-06-04 00:07:12 +02:00
|
|
|
|
url = "https://mobile.twitter.com/sessions"
|
2019-04-07 23:06:57 +02:00
|
|
|
|
data = {
|
2020-06-04 00:07:12 +02:00
|
|
|
|
"authenticity_token" : token,
|
2019-04-07 23:06:57 +02:00
|
|
|
|
"session[username_or_email]": username,
|
|
|
|
|
"session[password]" : password,
|
|
|
|
|
"remember_me" : "1",
|
2020-06-04 00:07:12 +02:00
|
|
|
|
"wfa" : "1",
|
|
|
|
|
"commit" : "+Log+in+",
|
|
|
|
|
"ui_metrics" : "",
|
2019-04-07 23:06:57 +02:00
|
|
|
|
}
|
2020-06-04 00:07:12 +02:00
|
|
|
|
response = self.request(url, method="POST", data=data)
|
|
|
|
|
cookies = {
|
2020-03-12 22:02:12 +01:00
|
|
|
|
cookie.name: cookie.value
|
|
|
|
|
for cookie in self.session.cookies
|
2020-06-04 00:07:12 +02:00
|
|
|
|
if cookie.domain == self.cookiedomain
|
2020-03-12 22:02:12 +01:00
|
|
|
|
}
|
2020-06-04 00:07:12 +02:00
|
|
|
|
|
|
|
|
|
if "/error" in response.url or "auth_token" not in cookies:
|
|
|
|
|
raise exception.AuthenticationError()
|
|
|
|
|
return cookies
|
2018-08-17 20:04:11 +02:00
|
|
|
|
|
|
|
|
|
|
2018-08-19 20:36:33 +02:00
|
|
|
|
class TwitterTimelineExtractor(TwitterExtractor):
|
|
|
|
|
"""Extractor for all images from a user's timeline"""
|
|
|
|
|
subcategory = "timeline"
|
2020-09-08 23:17:50 +02:00
|
|
|
|
pattern = BASE_PATTERN + \
|
|
|
|
|
r"/(?!search)(?:([^/?&#]+)/?(?:$|[?#])|intent/user\?user_id=(\d+))"
|
2019-09-01 17:37:48 +02:00
|
|
|
|
test = (
|
|
|
|
|
("https://twitter.com/supernaturepics", {
|
|
|
|
|
"range": "1-40",
|
|
|
|
|
"url": "0106229d408f4111d9a52c8fd2ad687f64842aa4",
|
|
|
|
|
}),
|
|
|
|
|
("https://mobile.twitter.com/supernaturepics?p=i"),
|
2020-09-08 22:56:52 +02:00
|
|
|
|
("https://www.twitter.com/id:2976459548"),
|
2020-09-08 23:17:50 +02:00
|
|
|
|
("https://twitter.com/intent/user?user_id=2976459548"),
|
2019-09-01 17:37:48 +02:00
|
|
|
|
)
|
2018-08-19 20:36:33 +02:00
|
|
|
|
|
2020-09-08 23:17:50 +02:00
|
|
|
|
def __init__(self, match):
|
|
|
|
|
TwitterExtractor.__init__(self, match)
|
|
|
|
|
uid = match.group(2)
|
|
|
|
|
if uid:
|
|
|
|
|
self.user = "id:" + uid
|
|
|
|
|
|
2018-08-19 20:36:33 +02:00
|
|
|
|
def tweets(self):
|
2020-06-03 20:51:29 +02:00
|
|
|
|
return TwitterAPI(self).timeline_profile(self.user)
|
2018-08-19 20:36:33 +02:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TwitterMediaExtractor(TwitterExtractor):
|
|
|
|
|
"""Extractor for all images from a user's Media Tweets"""
|
|
|
|
|
subcategory = "media"
|
2020-07-13 23:48:42 +02:00
|
|
|
|
pattern = BASE_PATTERN + r"/(?!search)([^/?&#]+)/media(?!\w)"
|
2019-09-01 17:37:48 +02:00
|
|
|
|
test = (
|
|
|
|
|
("https://twitter.com/supernaturepics/media", {
|
|
|
|
|
"range": "1-40",
|
|
|
|
|
"url": "0106229d408f4111d9a52c8fd2ad687f64842aa4",
|
|
|
|
|
}),
|
|
|
|
|
("https://mobile.twitter.com/supernaturepics/media#t"),
|
2020-09-08 22:56:52 +02:00
|
|
|
|
("https://www.twitter.com/id:2976459548/media"),
|
2019-09-01 17:37:48 +02:00
|
|
|
|
)
|
2018-08-19 20:36:33 +02:00
|
|
|
|
|
|
|
|
|
def tweets(self):
|
2020-06-03 20:51:29 +02:00
|
|
|
|
return TwitterAPI(self).timeline_media(self.user)
|
2018-08-19 20:36:33 +02:00
|
|
|
|
|
2019-10-17 18:34:07 +02:00
|
|
|
|
|
2020-06-16 14:27:22 +02:00
|
|
|
|
class TwitterLikesExtractor(TwitterExtractor):
|
|
|
|
|
"""Extractor for liked tweets"""
|
|
|
|
|
subcategory = "likes"
|
2020-07-13 23:48:42 +02:00
|
|
|
|
pattern = BASE_PATTERN + r"/(?!search)([^/?&#]+)/likes(?!\w)"
|
2020-06-16 14:27:22 +02:00
|
|
|
|
test = ("https://twitter.com/supernaturepics/likes",)
|
|
|
|
|
|
|
|
|
|
def tweets(self):
|
|
|
|
|
return TwitterAPI(self).timeline_favorites(self.user)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TwitterBookmarkExtractor(TwitterExtractor):
|
|
|
|
|
"""Extractor for bookmarked tweets"""
|
|
|
|
|
subcategory = "bookmark"
|
2020-07-13 23:48:42 +02:00
|
|
|
|
pattern = BASE_PATTERN + r"/i/bookmarks()"
|
2020-06-16 14:27:22 +02:00
|
|
|
|
test = ("https://twitter.com/i/bookmarks",)
|
|
|
|
|
|
|
|
|
|
def tweets(self):
|
|
|
|
|
return TwitterAPI(self).timeline_bookmark()
|
|
|
|
|
|
|
|
|
|
|
2019-10-16 18:23:10 +02:00
|
|
|
|
class TwitterSearchExtractor(TwitterExtractor):
|
|
|
|
|
"""Extractor for all images from a search timeline"""
|
|
|
|
|
subcategory = "search"
|
2019-10-17 18:34:07 +02:00
|
|
|
|
directory_fmt = ("{category}", "Search", "{search}")
|
2020-07-13 23:48:42 +02:00
|
|
|
|
pattern = BASE_PATTERN + r"/search/?\?(?:[^&#]+&)*q=([^&#]+)"
|
2019-10-17 18:34:07 +02:00
|
|
|
|
test = ("https://twitter.com/search?q=nature", {
|
|
|
|
|
"range": "1-40",
|
|
|
|
|
"count": 40,
|
|
|
|
|
})
|
|
|
|
|
|
|
|
|
|
def metadata(self):
|
2020-06-07 03:10:09 +02:00
|
|
|
|
return {"search": text.unquote(self.user)}
|
2019-10-17 18:34:07 +02:00
|
|
|
|
|
2019-10-16 18:23:10 +02:00
|
|
|
|
def tweets(self):
|
2020-06-21 15:43:27 +02:00
|
|
|
|
return TwitterAPI(self).search(text.unquote(self.user))
|
2019-10-17 18:34:07 +02:00
|
|
|
|
|
2018-08-19 20:36:33 +02:00
|
|
|
|
|
2018-08-17 20:04:11 +02:00
|
|
|
|
class TwitterTweetExtractor(TwitterExtractor):
|
2018-08-18 18:58:10 +02:00
|
|
|
|
"""Extractor for images from individual tweets"""
|
2018-08-17 20:04:11 +02:00
|
|
|
|
subcategory = "tweet"
|
2020-07-13 23:48:42 +02:00
|
|
|
|
pattern = BASE_PATTERN + r"/([^/?&#]+|i/web)/status/(\d+)"
|
2019-02-08 13:45:40 +01:00
|
|
|
|
test = (
|
2019-05-09 10:17:55 +02:00
|
|
|
|
("https://twitter.com/supernaturepics/status/604341487988576256", {
|
|
|
|
|
"url": "0e801d2f98142dd87c3630ded9e4be4a4d63b580",
|
|
|
|
|
"content": "ab05e1d8d21f8d43496df284d31e8b362cd3bcab",
|
2017-08-06 13:43:08 +02:00
|
|
|
|
}),
|
2019-04-21 15:41:22 +02:00
|
|
|
|
# 4 images
|
2017-08-06 13:43:08 +02:00
|
|
|
|
("https://twitter.com/perrypumas/status/894001459754180609", {
|
|
|
|
|
"url": "c8a262a9698cb733fb27870f5a8f75faf77d79f6",
|
2019-04-21 15:41:22 +02:00
|
|
|
|
}),
|
|
|
|
|
# video
|
|
|
|
|
("https://twitter.com/perrypumas/status/1065692031626829824", {
|
2020-06-03 20:51:29 +02:00
|
|
|
|
"pattern": r"https://video.twimg.com/ext_tw_video/.+\.mp4\?tag=5",
|
2017-08-06 13:43:08 +02:00
|
|
|
|
}),
|
2019-07-17 15:35:42 +02:00
|
|
|
|
# content with emoji, newlines, hashtags (#338)
|
2020-05-28 01:55:32 +02:00
|
|
|
|
("https://twitter.com/playpokemon/status/1263832915173048321", {
|
2020-06-06 23:51:54 +02:00
|
|
|
|
"keyword": {"content": (
|
2020-05-28 01:55:32 +02:00
|
|
|
|
r"re:Gear up for #PokemonSwordShieldEX with special Mystery "
|
|
|
|
|
"Gifts! \n\nYou’ll be able to receive four Galarian form "
|
|
|
|
|
"Pokémon with Hidden Abilities, plus some very useful items. "
|
|
|
|
|
"It’s our \\(Mystery\\) Gift to you, Trainers! \n\n❓🎁➡️ "
|
2020-02-22 02:59:56 +01:00
|
|
|
|
)},
|
2019-07-17 15:35:42 +02:00
|
|
|
|
}),
|
2020-06-19 18:12:57 +02:00
|
|
|
|
# Reply to deleted tweet (#403, #838)
|
|
|
|
|
("https://twitter.com/i/web/status/1170041925560258560", {
|
|
|
|
|
"pattern": r"https://pbs.twimg.com/media/EDzS7VrU0AAFL4_.jpg:orig",
|
2019-09-01 17:37:48 +02:00
|
|
|
|
}),
|
2020-04-29 23:11:24 +02:00
|
|
|
|
# 'replies' option (#705)
|
2020-06-19 18:12:57 +02:00
|
|
|
|
("https://twitter.com/i/web/status/1170041925560258560", {
|
2020-04-29 23:11:24 +02:00
|
|
|
|
"options": (("replies", False),),
|
|
|
|
|
"count": 0,
|
|
|
|
|
}),
|
2020-06-24 21:13:16 +02:00
|
|
|
|
# quoted tweet (#526, #854)
|
|
|
|
|
("https://twitter.com/StobiesGalaxy/status/1270755918330896395", {
|
|
|
|
|
"pattern": r"https://pbs\.twimg\.com/media/Ea[KG].+\.jpg",
|
|
|
|
|
"count": 8,
|
|
|
|
|
}),
|
|
|
|
|
# "quoted" option (#854)
|
|
|
|
|
("https://twitter.com/StobiesGalaxy/status/1270755918330896395", {
|
|
|
|
|
"options": (("quoted", False),),
|
|
|
|
|
"pattern": r"https://pbs\.twimg\.com/media/EaK.+\.jpg",
|
|
|
|
|
"count": 4,
|
2020-01-04 21:26:55 +01:00
|
|
|
|
}),
|
2020-01-18 21:26:46 +01:00
|
|
|
|
# TwitPic embeds (#579)
|
|
|
|
|
("https://twitter.com/i/web/status/112900228289540096", {
|
|
|
|
|
"options": (("twitpic", True),),
|
|
|
|
|
"pattern": r"https://\w+.cloudfront.net/photos/large/\d+.jpg",
|
|
|
|
|
"count": 3,
|
|
|
|
|
}),
|
2020-07-13 23:48:42 +02:00
|
|
|
|
# Nitter tweet
|
|
|
|
|
("https://nitter.net/ed1conf/status/1163841619336007680", {
|
|
|
|
|
"url": "0f6a841e23948e4320af7ae41125e0c5b3cadc98",
|
|
|
|
|
"content": "f29501e44d88437fe460f5c927b7543fda0f6e34",
|
|
|
|
|
}),
|
2019-02-08 13:45:40 +01:00
|
|
|
|
)
|
2016-10-06 19:12:07 +02:00
|
|
|
|
|
|
|
|
|
def __init__(self, match):
|
2018-08-19 20:36:33 +02:00
|
|
|
|
TwitterExtractor.__init__(self, match)
|
|
|
|
|
self.tweet_id = match.group(2)
|
2016-10-06 19:12:07 +02:00
|
|
|
|
|
2018-08-17 20:04:11 +02:00
|
|
|
|
def tweets(self):
|
2020-06-03 20:51:29 +02:00
|
|
|
|
return TwitterAPI(self).tweet(self.tweet_id)
|
2020-01-04 23:46:29 +01:00
|
|
|
|
|
|
|
|
|
|
2020-06-03 20:51:29 +02:00
|
|
|
|
class TwitterAPI():
|
|
|
|
|
|
|
|
|
|
def __init__(self, extractor):
|
|
|
|
|
self.extractor = extractor
|
|
|
|
|
self.headers = {
|
|
|
|
|
"authorization": "Bearer AAAAAAAAAAAAAAAAAAAAANRILgAAAAAAnNwIzUejR"
|
|
|
|
|
"COuH5E6I8xnZz4puTs%3D1Zv7ttfk8LF81IUq16cHjhLTvJu"
|
|
|
|
|
"4FA33AGWWjCpTnA",
|
|
|
|
|
"x-guest-token": None,
|
|
|
|
|
"x-twitter-client-language": "en",
|
|
|
|
|
"x-twitter-active-user": "yes",
|
|
|
|
|
"x-csrf-token": None,
|
|
|
|
|
"Origin": "https://twitter.com",
|
|
|
|
|
"Referer": "https://twitter.com/",
|
|
|
|
|
}
|
|
|
|
|
self.params = {
|
2020-03-05 22:55:26 +01:00
|
|
|
|
"include_profile_interstitial_type": "1",
|
|
|
|
|
"include_blocking": "1",
|
|
|
|
|
"include_blocked_by": "1",
|
|
|
|
|
"include_followed_by": "1",
|
|
|
|
|
"include_want_retweets": "1",
|
|
|
|
|
"include_mute_edge": "1",
|
|
|
|
|
"include_can_dm": "1",
|
|
|
|
|
"include_can_media_tag": "1",
|
|
|
|
|
"skip_status": "1",
|
|
|
|
|
"cards_platform": "Web-12",
|
|
|
|
|
"include_cards": "1",
|
|
|
|
|
"include_composer_source": "true",
|
|
|
|
|
"include_ext_alt_text": "true",
|
|
|
|
|
"include_reply_count": "1",
|
|
|
|
|
"tweet_mode": "extended",
|
|
|
|
|
"include_entities": "true",
|
|
|
|
|
"include_user_entities": "true",
|
|
|
|
|
"include_ext_media_color": "true",
|
|
|
|
|
"include_ext_media_availability": "true",
|
|
|
|
|
"send_error_codes": "true",
|
2020-06-03 20:51:29 +02:00
|
|
|
|
"simple_quoted_tweet": "true",
|
|
|
|
|
# "count": "20",
|
2020-03-05 22:55:26 +01:00
|
|
|
|
"count": "100",
|
|
|
|
|
"cursor": None,
|
2020-06-07 03:10:09 +02:00
|
|
|
|
"ext": "mediaStats,highlightedLabel,cameraMoment",
|
2020-06-03 20:51:29 +02:00
|
|
|
|
"include_quote_count": "true",
|
2020-03-05 22:55:26 +01:00
|
|
|
|
}
|
2020-06-03 20:51:29 +02:00
|
|
|
|
|
|
|
|
|
cookies = self.extractor.session.cookies
|
|
|
|
|
|
|
|
|
|
# CSRF
|
|
|
|
|
csrf = hashlib.md5(str(time.time()).encode()).hexdigest()
|
|
|
|
|
self.headers["x-csrf-token"] = csrf
|
|
|
|
|
cookies.set("ct0", csrf, domain=".twitter.com")
|
|
|
|
|
|
|
|
|
|
if cookies.get("auth_token", domain=".twitter.com"):
|
|
|
|
|
self.headers["x-twitter-auth-type"] = "OAuth2Session"
|
|
|
|
|
else:
|
|
|
|
|
# guest token
|
2020-06-18 00:28:38 +02:00
|
|
|
|
guest_token = self._guest_token()
|
2020-06-03 20:51:29 +02:00
|
|
|
|
self.headers["x-guest-token"] = guest_token
|
|
|
|
|
cookies.set("gt", guest_token, domain=".twitter.com")
|
|
|
|
|
|
|
|
|
|
def tweet(self, tweet_id):
|
|
|
|
|
endpoint = "2/timeline/conversation/{}.json".format(tweet_id)
|
2020-06-24 21:08:04 +02:00
|
|
|
|
tweets = []
|
2020-06-03 20:51:29 +02:00
|
|
|
|
for tweet in self._pagination(endpoint):
|
|
|
|
|
if tweet["id_str"] == tweet_id:
|
2020-06-24 21:08:04 +02:00
|
|
|
|
tweets.append(tweet)
|
|
|
|
|
if "quoted_status_id_str" in tweet:
|
|
|
|
|
tweet_id = tweet["quoted_status_id_str"]
|
|
|
|
|
else:
|
|
|
|
|
break
|
|
|
|
|
return tweets
|
2020-06-03 20:51:29 +02:00
|
|
|
|
|
|
|
|
|
def timeline_profile(self, screen_name):
|
2020-09-08 22:56:52 +02:00
|
|
|
|
user_id = self._user_id_by_screen_name(screen_name)
|
|
|
|
|
endpoint = "2/timeline/profile/{}.json".format(user_id)
|
2020-06-03 20:51:29 +02:00
|
|
|
|
return self._pagination(endpoint)
|
|
|
|
|
|
|
|
|
|
def timeline_media(self, screen_name):
|
2020-09-08 22:56:52 +02:00
|
|
|
|
user_id = self._user_id_by_screen_name(screen_name)
|
|
|
|
|
endpoint = "2/timeline/media/{}.json".format(user_id)
|
2020-06-03 20:51:29 +02:00
|
|
|
|
return self._pagination(endpoint)
|
|
|
|
|
|
2020-06-16 14:27:22 +02:00
|
|
|
|
def timeline_favorites(self, screen_name):
|
2020-09-08 22:56:52 +02:00
|
|
|
|
user_id = self._user_id_by_screen_name(screen_name)
|
|
|
|
|
endpoint = "2/timeline/favorites/{}.json".format(user_id)
|
2020-06-16 14:27:22 +02:00
|
|
|
|
return self._pagination(endpoint)
|
|
|
|
|
|
|
|
|
|
def timeline_bookmark(self):
|
|
|
|
|
endpoint = "2/timeline/bookmark.json"
|
|
|
|
|
return self._pagination(endpoint)
|
|
|
|
|
|
2020-06-03 20:51:29 +02:00
|
|
|
|
def search(self, query):
|
|
|
|
|
endpoint = "2/search/adaptive.json"
|
|
|
|
|
params = self.params.copy()
|
2020-06-21 15:43:27 +02:00
|
|
|
|
params["q"] = query
|
|
|
|
|
params["tweet_search_mode"] = "live"
|
|
|
|
|
params["query_source"] = "typed_query"
|
|
|
|
|
params["pc"] = "1"
|
|
|
|
|
params["spelling_corrections"] = "1"
|
2020-06-03 20:51:29 +02:00
|
|
|
|
return self._pagination(
|
|
|
|
|
endpoint, params, "sq-I-t-", "sq-cursor-bottom")
|
|
|
|
|
|
|
|
|
|
def user_by_screen_name(self, screen_name):
|
|
|
|
|
endpoint = "graphql/-xfUfZsnR_zqjFd-IfrN5A/UserByScreenName"
|
|
|
|
|
params = {
|
|
|
|
|
"variables": '{"screen_name":"' + screen_name + '"'
|
|
|
|
|
',"withHighlightedLabel":true}'
|
2020-03-05 22:55:26 +01:00
|
|
|
|
}
|
2020-07-14 16:47:25 +02:00
|
|
|
|
try:
|
|
|
|
|
return self._call(endpoint, params)["data"]["user"]
|
|
|
|
|
except KeyError:
|
|
|
|
|
raise exception.NotFoundError("user")
|
2020-06-03 20:51:29 +02:00
|
|
|
|
|
2020-09-08 22:56:52 +02:00
|
|
|
|
def _user_id_by_screen_name(self, screen_name):
|
|
|
|
|
if screen_name.startswith("id:"):
|
|
|
|
|
return screen_name[3:]
|
|
|
|
|
return self.user_by_screen_name(screen_name)["rest_id"]
|
|
|
|
|
|
2020-06-18 00:28:38 +02:00
|
|
|
|
@cache(maxage=3600)
|
|
|
|
|
def _guest_token(self):
|
|
|
|
|
endpoint = "1.1/guest/activate.json"
|
|
|
|
|
return self._call(endpoint, None, "POST")["guest_token"]
|
|
|
|
|
|
|
|
|
|
def _call(self, endpoint, params, method="GET"):
|
2020-06-03 20:51:29 +02:00
|
|
|
|
url = "https://api.twitter.com/" + endpoint
|
|
|
|
|
response = self.extractor.request(
|
2020-06-18 00:28:38 +02:00
|
|
|
|
url, method=method, params=params, headers=self.headers,
|
|
|
|
|
fatal=None)
|
2020-06-03 20:51:29 +02:00
|
|
|
|
if response.status_code < 400:
|
|
|
|
|
return response.json()
|
|
|
|
|
if response.status_code == 429:
|
2020-07-23 22:38:17 +02:00
|
|
|
|
until = response.headers.get("x-rate-limit-reset")
|
|
|
|
|
self.extractor.wait(until=until, seconds=(None if until else 60))
|
|
|
|
|
return self._call(endpoint, params, method)
|
2020-07-06 23:13:05 +02:00
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
msg = ", ".join(
|
|
|
|
|
'"' + error["message"] + '"'
|
|
|
|
|
for error in response.json()["errors"]
|
|
|
|
|
)
|
|
|
|
|
except Exception:
|
|
|
|
|
msg = response.text
|
2020-06-03 20:51:29 +02:00
|
|
|
|
raise exception.StopExtraction(
|
2020-07-06 23:13:05 +02:00
|
|
|
|
"%s %s (%s)", response.status_code, response.reason, msg)
|
2020-06-03 20:51:29 +02:00
|
|
|
|
|
|
|
|
|
def _pagination(self, endpoint, params=None,
|
|
|
|
|
entry_tweet="tweet-", entry_cursor="cursor-bottom-"):
|
|
|
|
|
if params is None:
|
|
|
|
|
params = self.params.copy()
|
2020-03-05 22:55:26 +01:00
|
|
|
|
|
|
|
|
|
while True:
|
2020-06-07 03:10:09 +02:00
|
|
|
|
cursor = tweet = None
|
2020-06-03 20:51:29 +02:00
|
|
|
|
data = self._call(endpoint, params)
|
2020-06-07 03:10:09 +02:00
|
|
|
|
|
|
|
|
|
instr = data["timeline"]["instructions"]
|
|
|
|
|
if not instr:
|
|
|
|
|
return
|
2020-03-05 22:55:26 +01:00
|
|
|
|
tweets = data["globalObjects"]["tweets"]
|
2020-06-03 20:51:29 +02:00
|
|
|
|
users = data["globalObjects"]["users"]
|
|
|
|
|
|
2020-06-07 03:10:09 +02:00
|
|
|
|
for entry in instr[0]["addEntries"]["entries"]:
|
2020-06-03 20:51:29 +02:00
|
|
|
|
|
|
|
|
|
if entry["entryId"].startswith(entry_tweet):
|
2020-06-19 14:40:17 +02:00
|
|
|
|
try:
|
|
|
|
|
tweet = tweets[
|
|
|
|
|
entry["content"]["item"]["content"]["tweet"]["id"]]
|
|
|
|
|
except KeyError:
|
2020-06-04 14:51:25 +02:00
|
|
|
|
self.extractor.log.debug(
|
2020-07-11 00:41:50 +02:00
|
|
|
|
"Skipping %s (deleted)",
|
|
|
|
|
entry["entryId"][len(entry_tweet):])
|
2020-06-04 14:51:25 +02:00
|
|
|
|
continue
|
2020-06-03 20:51:29 +02:00
|
|
|
|
tweet["user"] = users[tweet["user_id_str"]]
|
|
|
|
|
|
2020-06-24 21:08:04 +02:00
|
|
|
|
if "retweeted_status_id_str" in tweet:
|
2020-06-06 23:51:54 +02:00
|
|
|
|
retweet = tweets.get(tweet["retweeted_status_id_str"])
|
|
|
|
|
if retweet:
|
|
|
|
|
tweet["author"] = users[retweet["user_id_str"]]
|
2020-06-03 20:51:29 +02:00
|
|
|
|
yield tweet
|
|
|
|
|
|
2020-06-24 21:08:04 +02:00
|
|
|
|
if "quoted_status_id_str" in tweet:
|
|
|
|
|
quoted = tweets.get(tweet["quoted_status_id_str"])
|
|
|
|
|
if quoted:
|
|
|
|
|
quoted["author"] = users[quoted["user_id_str"]]
|
|
|
|
|
quoted["user"] = tweet["user"]
|
2020-06-24 21:13:16 +02:00
|
|
|
|
quoted["quoted"] = True
|
2020-06-24 21:08:04 +02:00
|
|
|
|
yield quoted
|
|
|
|
|
|
2020-06-03 20:51:29 +02:00
|
|
|
|
elif entry["entryId"].startswith(entry_cursor):
|
2020-06-07 03:10:09 +02:00
|
|
|
|
cursor = entry["content"]["operation"]["cursor"]
|
|
|
|
|
if not cursor.get("stopOnEmptyResponse"):
|
|
|
|
|
# keep going even if there are no tweets
|
|
|
|
|
tweet = True
|
|
|
|
|
cursor = cursor["value"]
|
|
|
|
|
|
|
|
|
|
if "replaceEntry" in instr[-1] :
|
|
|
|
|
cursor = (instr[-1]["replaceEntry"]["entry"]
|
|
|
|
|
["content"]["operation"]["cursor"]["value"])
|
2020-06-03 20:51:29 +02:00
|
|
|
|
|
2020-06-07 03:10:09 +02:00
|
|
|
|
if not cursor or not tweet:
|
2020-03-05 22:55:26 +01:00
|
|
|
|
return
|
2020-06-03 20:51:29 +02:00
|
|
|
|
params["cursor"] = cursor
|