2016-10-06 19:12:07 +02:00
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
|
2019-02-08 13:45:40 +01:00
|
|
|
# Copyright 2016-2019 Mike Fährmann
|
2016-10-06 19:12:07 +02:00
|
|
|
#
|
|
|
|
# This program is free software; you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License version 2 as
|
|
|
|
# published by the Free Software Foundation.
|
|
|
|
|
|
|
|
"""Extract images from https://twitter.com/"""
|
|
|
|
|
|
|
|
from .common import Extractor, Message
|
2019-04-07 23:06:57 +02:00
|
|
|
from .. import text, exception
|
|
|
|
from ..cache import cache
|
2019-07-17 15:35:42 +02:00
|
|
|
import re
|
2016-10-06 19:12:07 +02:00
|
|
|
|
2017-02-01 00:53:19 +01:00
|
|
|
|
2018-08-17 20:04:11 +02:00
|
|
|
class TwitterExtractor(Extractor):
|
|
|
|
"""Base class for twitter extractors"""
|
2016-10-06 19:12:07 +02:00
|
|
|
category = "twitter"
|
2019-02-08 13:45:40 +01:00
|
|
|
directory_fmt = ("{category}", "{user}")
|
2017-09-10 22:20:47 +02:00
|
|
|
filename_fmt = "{tweet_id}_{num}.{extension}"
|
2018-08-18 18:58:10 +02:00
|
|
|
archive_fmt = "{tweet_id}_{retweet_id}_{num}"
|
2018-08-17 20:04:11 +02:00
|
|
|
root = "https://twitter.com"
|
2019-04-30 15:43:43 +02:00
|
|
|
sizes = (":orig", ":large", ":medium", ":small")
|
2018-08-17 20:04:11 +02:00
|
|
|
|
2018-08-19 20:36:33 +02:00
|
|
|
def __init__(self, match):
|
2019-02-11 13:31:10 +01:00
|
|
|
Extractor.__init__(self, match)
|
2018-08-19 20:36:33 +02:00
|
|
|
self.user = match.group(1)
|
|
|
|
self.retweets = self.config("retweets", True)
|
2019-07-17 15:35:42 +02:00
|
|
|
self.content = self.config("content", False)
|
2018-09-30 18:41:39 +02:00
|
|
|
self.videos = self.config("videos", False)
|
|
|
|
|
2019-07-17 15:35:42 +02:00
|
|
|
if self.content:
|
|
|
|
self._emoji_sub = re.compile(
|
|
|
|
r'<img class="Emoji [^>]+ alt="([^"]+)"[^>]*>').sub
|
|
|
|
|
2018-08-17 20:04:11 +02:00
|
|
|
def items(self):
|
2019-04-07 23:06:57 +02:00
|
|
|
self.login()
|
2018-08-17 20:04:11 +02:00
|
|
|
yield Message.Version, 1
|
|
|
|
yield Message.Directory, self.metadata()
|
|
|
|
|
|
|
|
for tweet in self.tweets():
|
|
|
|
data = self._data_from_tweet(tweet)
|
2019-07-15 16:19:26 +02:00
|
|
|
|
2018-08-17 20:04:11 +02:00
|
|
|
if not self.retweets and data["retweet_id"]:
|
|
|
|
continue
|
|
|
|
|
2018-09-30 18:41:39 +02:00
|
|
|
images = text.extract_iter(
|
|
|
|
tweet, 'data-image-url="', '"')
|
2018-08-17 20:04:11 +02:00
|
|
|
for data["num"], url in enumerate(images, 1):
|
|
|
|
text.nameext_from_url(url, data)
|
2019-04-30 15:43:43 +02:00
|
|
|
urls = [url + size for size in self.sizes]
|
|
|
|
yield Message.Urllist, urls, data
|
2018-08-17 20:04:11 +02:00
|
|
|
|
2018-09-30 18:41:39 +02:00
|
|
|
if self.videos and "-videoContainer" in tweet:
|
2018-10-05 17:58:15 +02:00
|
|
|
data["num"] = 1
|
2019-08-14 12:28:21 +02:00
|
|
|
data["extension"] = None
|
2018-10-05 17:58:15 +02:00
|
|
|
url = "ytdl:{}/{}/status/{}".format(
|
2018-09-30 18:41:39 +02:00
|
|
|
self.root, data["user"], data["tweet_id"])
|
2018-10-05 17:58:15 +02:00
|
|
|
yield Message.Url, url, data
|
2018-09-30 18:41:39 +02:00
|
|
|
|
2018-08-17 20:04:11 +02:00
|
|
|
def metadata(self):
|
|
|
|
"""Return general metadata"""
|
2018-08-19 20:36:33 +02:00
|
|
|
return {"user": self.user}
|
2018-08-17 20:04:11 +02:00
|
|
|
|
|
|
|
def tweets(self):
|
|
|
|
"""Yield HTML content of all relevant tweets"""
|
|
|
|
|
2019-04-07 23:06:57 +02:00
|
|
|
def login(self):
|
|
|
|
username, password = self._get_auth_info()
|
|
|
|
if username:
|
|
|
|
self._update_cookies(self._login_impl(username, password))
|
|
|
|
|
|
|
|
@cache(maxage=360*24*3600, keyarg=1)
|
|
|
|
def _login_impl(self, username, password):
|
|
|
|
self.log.info("Logging in as %s", username)
|
|
|
|
|
|
|
|
page = self.request(self.root + "/login").text
|
|
|
|
pos = page.index('name="authenticity_token"')
|
|
|
|
token = text.extract(page, 'value="', '"', pos-80)[0]
|
|
|
|
|
|
|
|
url = self.root + "/sessions"
|
|
|
|
data = {
|
|
|
|
"session[username_or_email]": username,
|
|
|
|
"session[password]" : password,
|
|
|
|
"authenticity_token" : token,
|
|
|
|
"ui_metrics" : '{"rf":{},"s":""}',
|
|
|
|
"scribe_log" : "",
|
|
|
|
"redirect_after_login" : "",
|
|
|
|
"remember_me" : "1",
|
|
|
|
}
|
|
|
|
response = self.request(url, method="POST", data=data)
|
|
|
|
|
|
|
|
if "/error" in response.url:
|
|
|
|
raise exception.AuthenticationError()
|
|
|
|
return self.session.cookies
|
|
|
|
|
2019-07-17 15:35:42 +02:00
|
|
|
def _data_from_tweet(self, tweet):
|
2019-04-19 23:02:29 +02:00
|
|
|
extr = text.extract_from(tweet)
|
2019-07-17 15:35:42 +02:00
|
|
|
data = {
|
2019-04-19 23:02:29 +02:00
|
|
|
"tweet_id" : text.parse_int(extr('data-tweet-id="' , '"')),
|
|
|
|
"retweet_id": text.parse_int(extr('data-retweet-id="', '"')),
|
|
|
|
"retweeter" : extr('data-retweeter="' , '"'),
|
|
|
|
"user" : extr('data-screen-name="', '"'),
|
|
|
|
"username" : extr('data-name="' , '"'),
|
|
|
|
"user_id" : text.parse_int(extr('data-user-id="' , '"')),
|
2019-04-21 15:41:22 +02:00
|
|
|
"date" : text.parse_timestamp(extr('data-time="', '"')),
|
2019-04-19 23:02:29 +02:00
|
|
|
}
|
2019-07-17 15:35:42 +02:00
|
|
|
if self.content:
|
|
|
|
content = extr('<div class="js-tweet-text-container">', '\n</div>')
|
|
|
|
if '<img class="Emoji ' in content:
|
|
|
|
content = self._emoji_sub(r"\1", content)
|
|
|
|
content = text.unescape(text.remove_html(content, "", ""))
|
|
|
|
cl, _, cr = content.rpartition("pic.twitter.com/")
|
|
|
|
data["content"] = cl if cl and len(cr) < 16 else content
|
|
|
|
return data
|
2018-08-17 20:04:11 +02:00
|
|
|
|
2018-08-19 20:36:33 +02:00
|
|
|
def _tweets_from_api(self, url):
|
2018-08-18 18:58:10 +02:00
|
|
|
params = {
|
|
|
|
"include_available_features": "1",
|
|
|
|
"include_entities": "1",
|
|
|
|
"reset_error_state": "false",
|
|
|
|
"lang": "en",
|
|
|
|
}
|
|
|
|
headers = {
|
|
|
|
"X-Requested-With": "XMLHttpRequest",
|
|
|
|
"X-Twitter-Active-User": "yes",
|
|
|
|
"Referer": "{}/{}".format(self.root, self.user)
|
|
|
|
}
|
2018-08-17 20:04:11 +02:00
|
|
|
|
|
|
|
while True:
|
2018-08-18 18:58:10 +02:00
|
|
|
data = self.request(url, params=params, headers=headers).json()
|
2018-11-14 11:48:09 +01:00
|
|
|
if "inner" in data:
|
|
|
|
data = data["inner"]
|
2018-08-17 20:04:11 +02:00
|
|
|
|
|
|
|
for tweet in text.extract_iter(
|
2018-08-18 18:58:10 +02:00
|
|
|
data["items_html"], '<div class="tweet ', '\n</li>'):
|
2018-08-17 20:04:11 +02:00
|
|
|
yield tweet
|
|
|
|
|
2018-08-18 18:58:10 +02:00
|
|
|
if not data["has_more_items"]:
|
2018-08-17 20:04:11 +02:00
|
|
|
return
|
2019-05-05 22:46:52 +02:00
|
|
|
|
2019-10-16 18:23:10 +02:00
|
|
|
if "min_position" in data:
|
|
|
|
position = data["min_position"]
|
|
|
|
if "max_position" in params and position == params["max_position"]:
|
|
|
|
return
|
|
|
|
else:
|
|
|
|
position = text.parse_int(text.extract(
|
|
|
|
tweet, 'data-tweet-id="', '"')[0])
|
|
|
|
if "max_position" in params and position >= params["max_position"]:
|
|
|
|
return
|
2019-05-05 22:46:52 +02:00
|
|
|
params["max_position"] = position
|
2018-08-17 20:04:11 +02:00
|
|
|
|
|
|
|
|
2018-08-19 20:36:33 +02:00
|
|
|
class TwitterTimelineExtractor(TwitterExtractor):
|
|
|
|
"""Extractor for all images from a user's timeline"""
|
|
|
|
subcategory = "timeline"
|
2019-02-08 13:45:40 +01:00
|
|
|
pattern = (r"(?:https?://)?(?:www\.|mobile\.)?twitter\.com"
|
2019-10-16 18:23:10 +02:00
|
|
|
r"/((?!search)[^/?&#]+)/?(?:$|[?#])")
|
2019-09-01 17:37:48 +02:00
|
|
|
test = (
|
|
|
|
("https://twitter.com/supernaturepics", {
|
|
|
|
"range": "1-40",
|
|
|
|
"url": "0106229d408f4111d9a52c8fd2ad687f64842aa4",
|
|
|
|
"keyword": "7210d679606240405e0cf62cbc67596e81a7a250",
|
|
|
|
}),
|
|
|
|
("https://mobile.twitter.com/supernaturepics?p=i"),
|
|
|
|
)
|
2018-08-19 20:36:33 +02:00
|
|
|
|
|
|
|
def tweets(self):
|
|
|
|
url = "{}/i/profiles/show/{}/timeline/tweets".format(
|
|
|
|
self.root, self.user)
|
|
|
|
return self._tweets_from_api(url)
|
|
|
|
|
|
|
|
|
|
|
|
class TwitterMediaExtractor(TwitterExtractor):
|
|
|
|
"""Extractor for all images from a user's Media Tweets"""
|
|
|
|
subcategory = "media"
|
2019-02-08 13:45:40 +01:00
|
|
|
pattern = (r"(?:https?://)?(?:www\.|mobile\.)?twitter\.com"
|
2019-10-16 18:23:10 +02:00
|
|
|
r"/((?!search)[^/?&#]+)/media(?!\w)")
|
2019-09-01 17:37:48 +02:00
|
|
|
test = (
|
|
|
|
("https://twitter.com/supernaturepics/media", {
|
|
|
|
"range": "1-40",
|
|
|
|
"url": "0106229d408f4111d9a52c8fd2ad687f64842aa4",
|
|
|
|
}),
|
|
|
|
("https://mobile.twitter.com/supernaturepics/media#t"),
|
|
|
|
)
|
2018-08-19 20:36:33 +02:00
|
|
|
|
|
|
|
def tweets(self):
|
|
|
|
url = "{}/i/profiles/show/{}/media_timeline".format(
|
|
|
|
self.root, self.user)
|
|
|
|
return self._tweets_from_api(url)
|
|
|
|
|
2019-10-16 18:23:10 +02:00
|
|
|
class TwitterSearchExtractor(TwitterExtractor):
|
|
|
|
"""Extractor for all images from a search timeline"""
|
|
|
|
subcategory = "search"
|
|
|
|
pattern = (r"(?:https?://)?(?:www\.|mobile\.)?twitter\.com"
|
|
|
|
r"/search[^q]+q=([^/?&#]+)(?:$|&)")
|
|
|
|
test = ()
|
|
|
|
|
|
|
|
def tweets(self):
|
|
|
|
url = "{}/i/search/timeline?f=tweets&q={}".format(
|
|
|
|
self.root, self.user)
|
|
|
|
return self._tweets_from_api(url)
|
2018-08-19 20:36:33 +02:00
|
|
|
|
2018-08-17 20:04:11 +02:00
|
|
|
class TwitterTweetExtractor(TwitterExtractor):
|
2018-08-18 18:58:10 +02:00
|
|
|
"""Extractor for images from individual tweets"""
|
2018-08-17 20:04:11 +02:00
|
|
|
subcategory = "tweet"
|
2019-02-08 13:45:40 +01:00
|
|
|
pattern = (r"(?:https?://)?(?:www\.|mobile\.)?twitter\.com"
|
2019-09-24 21:18:05 +02:00
|
|
|
r"/([^/?&#]+|i/web)/status/(\d+)")
|
2019-02-08 13:45:40 +01:00
|
|
|
test = (
|
2019-05-09 10:17:55 +02:00
|
|
|
("https://twitter.com/supernaturepics/status/604341487988576256", {
|
|
|
|
"url": "0e801d2f98142dd87c3630ded9e4be4a4d63b580",
|
2019-07-17 15:35:42 +02:00
|
|
|
"keyword": "1b8afb93cc04a9f44d89173f8facc61c3a6caf91",
|
2019-05-09 10:17:55 +02:00
|
|
|
"content": "ab05e1d8d21f8d43496df284d31e8b362cd3bcab",
|
2017-08-06 13:43:08 +02:00
|
|
|
}),
|
2019-04-21 15:41:22 +02:00
|
|
|
# 4 images
|
2017-08-06 13:43:08 +02:00
|
|
|
("https://twitter.com/perrypumas/status/894001459754180609", {
|
|
|
|
"url": "c8a262a9698cb733fb27870f5a8f75faf77d79f6",
|
2019-07-17 15:35:42 +02:00
|
|
|
"keyword": "43d98ab448193f0d4f30aa571a4b6bda9b6a5692",
|
2019-04-21 15:41:22 +02:00
|
|
|
}),
|
|
|
|
# video
|
|
|
|
("https://twitter.com/perrypumas/status/1065692031626829824", {
|
|
|
|
"options": (("videos", True),),
|
|
|
|
"pattern": r"ytdl:https://twitter.com/perrypumas/status/\d+",
|
2017-08-06 13:43:08 +02:00
|
|
|
}),
|
2019-07-17 15:35:42 +02:00
|
|
|
# content with emoji, newlines, hashtags (#338)
|
|
|
|
("https://twitter.com/yumi_san0112/status/1151144618936823808", {
|
|
|
|
"options": (("content", True),),
|
|
|
|
"keyword": "b13b6c4cd0b0c15b2ea7685479e7fedde3c47b9e",
|
|
|
|
}),
|
2019-09-01 17:37:48 +02:00
|
|
|
# Reply to another tweet (#403)
|
|
|
|
("https://twitter.com/tyson_hesse/status/1103767554424598528", {
|
|
|
|
"options": (("videos", True),),
|
|
|
|
"pattern": r"ytdl:https://twitter.com/.*/1103767554424598528$",
|
|
|
|
}),
|
2019-09-24 21:18:05 +02:00
|
|
|
# /i/web/ URL
|
|
|
|
("https://twitter.com/i/web/status/1155074198240292865", {
|
|
|
|
"pattern": r"https://pbs.twimg.com/media/EAel0vUUYAAZ4Bq.jpg:orig",
|
|
|
|
}),
|
2019-02-08 13:45:40 +01:00
|
|
|
)
|
2016-10-06 19:12:07 +02:00
|
|
|
|
|
|
|
def __init__(self, match):
|
2018-08-19 20:36:33 +02:00
|
|
|
TwitterExtractor.__init__(self, match)
|
|
|
|
self.tweet_id = match.group(2)
|
2016-10-06 19:12:07 +02:00
|
|
|
|
2018-08-17 20:04:11 +02:00
|
|
|
def metadata(self):
|
|
|
|
return {"user": self.user, "tweet_id": self.tweet_id}
|
2016-10-06 19:12:07 +02:00
|
|
|
|
2018-08-17 20:04:11 +02:00
|
|
|
def tweets(self):
|
2019-09-01 17:37:48 +02:00
|
|
|
self.session.cookies.clear()
|
2019-09-24 21:18:05 +02:00
|
|
|
url = "{}/i/web/status/{}".format(self.root, self.tweet_id)
|
2018-08-17 20:04:11 +02:00
|
|
|
page = self.request(url).text
|
2019-09-01 17:37:48 +02:00
|
|
|
end = page.index('class="js-tweet-stats-container')
|
|
|
|
beg = page.rindex('<div class="tweet ', 0, end)
|
|
|
|
return (page[beg:end],)
|