2019-02-16 22:56:04 +01:00
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
|
2023-02-07 23:14:53 +01:00
|
|
|
# Copyright 2019-2023 Mike Fährmann
|
2019-02-16 22:56:04 +01:00
|
|
|
#
|
|
|
|
# This program is free software; you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License version 2 as
|
|
|
|
# published by the Free Software Foundation.
|
|
|
|
|
|
|
|
"""Extractors for https://www.weibo.com/"""
|
|
|
|
|
|
|
|
from .common import Extractor, Message
|
2023-02-07 23:14:53 +01:00
|
|
|
from .. import text, util, exception
|
2022-05-31 11:57:45 +02:00
|
|
|
from ..cache import cache
|
|
|
|
import random
|
2019-02-16 22:56:04 +01:00
|
|
|
|
2022-06-03 16:36:22 +02:00
|
|
|
BASE_PATTERN = r"(?:https?://)?(?:www\.|m\.)?weibo\.c(?:om|n)"
|
|
|
|
USER_PATTERN = BASE_PATTERN + r"/(?:(u|n|p(?:rofile)?)/)?([^/?#]+)(?:/home)?"
|
|
|
|
|
2019-02-16 22:56:04 +01:00
|
|
|
|
|
|
|
class WeiboExtractor(Extractor):
|
|
|
|
category = "weibo"
|
|
|
|
directory_fmt = ("{category}", "{user[screen_name]}")
|
|
|
|
filename_fmt = "{status[id]}_{num:>02}.{extension}"
|
|
|
|
archive_fmt = "{status[id]}_{num}"
|
2022-05-31 11:57:45 +02:00
|
|
|
root = "https://weibo.com"
|
2022-01-31 19:13:02 +01:00
|
|
|
request_interval = (1.0, 2.0)
|
2019-02-16 22:56:04 +01:00
|
|
|
|
|
|
|
def __init__(self, match):
|
|
|
|
Extractor.__init__(self, match)
|
2022-06-03 16:36:22 +02:00
|
|
|
self._prefix, self.user = match.groups()
|
2023-07-25 20:09:44 +02:00
|
|
|
|
|
|
|
def _init(self):
|
2019-02-16 22:56:04 +01:00
|
|
|
self.retweets = self.config("retweets", True)
|
2020-04-29 23:27:29 +02:00
|
|
|
self.videos = self.config("videos", True)
|
2022-05-31 15:14:37 +02:00
|
|
|
self.livephoto = self.config("livephoto", True)
|
2019-02-16 22:56:04 +01:00
|
|
|
|
2022-05-31 11:57:45 +02:00
|
|
|
cookies = _cookie_cache()
|
|
|
|
if cookies is not None:
|
2023-07-21 22:38:39 +02:00
|
|
|
self.cookies.update(cookies)
|
2022-05-31 11:57:45 +02:00
|
|
|
|
|
|
|
def request(self, url, **kwargs):
|
|
|
|
response = Extractor.request(self, url, **kwargs)
|
|
|
|
|
2023-11-10 19:35:29 +01:00
|
|
|
if response.history:
|
|
|
|
if "login.sina.com" in response.url:
|
|
|
|
raise exception.StopExtraction(
|
|
|
|
"HTTP redirect to login page (%s)",
|
|
|
|
response.url.partition("?")[0])
|
|
|
|
if "passport.weibo.com" in response.url:
|
|
|
|
self._sina_visitor_system(response)
|
|
|
|
response = Extractor.request(self, url, **kwargs)
|
2022-05-31 11:57:45 +02:00
|
|
|
|
2022-06-03 16:36:22 +02:00
|
|
|
return response
|
2022-05-31 11:57:45 +02:00
|
|
|
|
2019-02-16 22:56:04 +01:00
|
|
|
def items(self):
|
2021-05-27 23:09:42 +02:00
|
|
|
original_retweets = (self.retweets == "original")
|
2019-02-16 22:56:04 +01:00
|
|
|
|
|
|
|
for status in self.statuses():
|
|
|
|
|
2022-11-30 11:36:46 +01:00
|
|
|
files = []
|
2020-06-17 23:25:21 +02:00
|
|
|
if self.retweets and "retweeted_status" in status:
|
2021-05-27 23:09:42 +02:00
|
|
|
if original_retweets:
|
|
|
|
status = status["retweeted_status"]
|
2022-11-30 11:36:46 +01:00
|
|
|
self._extract_status(status, files)
|
2021-05-27 23:09:42 +02:00
|
|
|
else:
|
2022-11-30 11:36:46 +01:00
|
|
|
self._extract_status(status, files)
|
|
|
|
self._extract_status(status["retweeted_status"], files)
|
2021-05-27 23:09:42 +02:00
|
|
|
else:
|
2022-11-30 11:36:46 +01:00
|
|
|
self._extract_status(status, files)
|
2020-06-17 23:25:21 +02:00
|
|
|
|
2022-06-11 14:28:16 +02:00
|
|
|
status["date"] = text.parse_datetime(
|
|
|
|
status["created_at"], "%a %b %d %H:%M:%S %z %Y")
|
2022-11-30 11:36:46 +01:00
|
|
|
status["count"] = len(files)
|
2022-06-11 14:28:16 +02:00
|
|
|
yield Message.Directory, status
|
|
|
|
|
2020-06-17 23:25:21 +02:00
|
|
|
for num, file in enumerate(files, 1):
|
2022-06-03 17:37:57 +02:00
|
|
|
if file["url"].startswith("http:"):
|
|
|
|
file["url"] = "https:" + file["url"][5:]
|
2022-05-31 15:14:37 +02:00
|
|
|
if "filename" not in file:
|
|
|
|
text.nameext_from_url(file["url"], file)
|
2023-06-15 13:49:17 +02:00
|
|
|
if file["extension"] == "json":
|
|
|
|
file["extension"] = "mp4"
|
2020-06-17 23:25:21 +02:00
|
|
|
file["status"] = status
|
|
|
|
file["num"] = num
|
|
|
|
yield Message.Url, file["url"], file
|
|
|
|
|
2022-11-30 11:36:46 +01:00
|
|
|
def _extract_status(self, status, files):
|
|
|
|
append = files.append
|
|
|
|
|
2023-03-18 15:19:25 +01:00
|
|
|
if "mix_media_info" in status:
|
|
|
|
for item in status["mix_media_info"]["items"]:
|
|
|
|
type = item.get("type")
|
|
|
|
if type == "video":
|
|
|
|
if self.videos:
|
|
|
|
append(self._extract_video(item["data"]["media_info"]))
|
|
|
|
elif type == "pic":
|
|
|
|
append(item["data"]["largest"].copy())
|
|
|
|
else:
|
|
|
|
self.log.warning("Unknown media type '%s'", type)
|
|
|
|
return
|
|
|
|
|
2022-05-31 11:57:45 +02:00
|
|
|
pic_ids = status.get("pic_ids")
|
|
|
|
if pic_ids:
|
|
|
|
pics = status["pic_infos"]
|
|
|
|
for pic_id in pic_ids:
|
2022-05-31 15:14:37 +02:00
|
|
|
pic = pics[pic_id]
|
2022-06-03 17:33:14 +02:00
|
|
|
pic_type = pic.get("type")
|
|
|
|
|
|
|
|
if pic_type == "gif" and self.videos:
|
2022-11-30 11:36:46 +01:00
|
|
|
append({"url": pic["video"]})
|
2022-06-03 17:33:14 +02:00
|
|
|
|
|
|
|
elif pic_type == "livephoto" and self.livephoto:
|
2022-11-30 11:36:46 +01:00
|
|
|
append(pic["largest"].copy())
|
2022-05-31 15:14:37 +02:00
|
|
|
|
|
|
|
file = {"url": pic["video"]}
|
|
|
|
file["filehame"], _, file["extension"] = \
|
|
|
|
pic["video"].rpartition("%2F")[2].rpartition(".")
|
2022-11-30 11:36:46 +01:00
|
|
|
append(file)
|
2022-05-31 11:57:45 +02:00
|
|
|
|
2022-06-03 17:33:14 +02:00
|
|
|
else:
|
2022-11-30 11:36:46 +01:00
|
|
|
append(pic["largest"].copy())
|
2022-06-03 17:33:14 +02:00
|
|
|
|
2023-03-18 15:19:25 +01:00
|
|
|
if "page_info" in status:
|
|
|
|
info = status["page_info"]
|
|
|
|
if "media_info" in info and self.videos:
|
|
|
|
append(self._extract_video(info["media_info"]))
|
|
|
|
|
|
|
|
def _extract_video(self, info):
|
|
|
|
try:
|
|
|
|
media = max(info["playback_list"],
|
|
|
|
key=lambda m: m["meta"]["quality_index"])
|
|
|
|
except Exception:
|
|
|
|
return {"url": (info.get("stream_url_hd") or
|
2023-06-08 22:22:43 +02:00
|
|
|
info.get("stream_url") or "")}
|
2023-03-18 15:19:25 +01:00
|
|
|
else:
|
|
|
|
return media["play_info"].copy()
|
2019-02-16 22:56:04 +01:00
|
|
|
|
2022-06-03 16:36:22 +02:00
|
|
|
def _status_by_id(self, status_id):
|
|
|
|
url = "{}/ajax/statuses/show?id={}".format(self.root, status_id)
|
|
|
|
return self.request(url).json()
|
2019-02-16 22:56:04 +01:00
|
|
|
|
2022-06-03 16:36:22 +02:00
|
|
|
def _user_id(self):
|
2023-05-14 18:45:37 +02:00
|
|
|
if len(self.user) >= 10 and self.user.isdecimal():
|
2022-06-03 16:36:22 +02:00
|
|
|
return self.user[-10:]
|
2022-05-31 22:45:42 +02:00
|
|
|
else:
|
|
|
|
url = "{}/ajax/profile/info?{}={}".format(
|
|
|
|
self.root,
|
2022-06-03 16:36:22 +02:00
|
|
|
"screen_name" if self._prefix == "n" else "custom",
|
2022-05-31 22:45:42 +02:00
|
|
|
self.user)
|
2022-06-03 16:36:22 +02:00
|
|
|
return self.request(url).json()["data"]["user"]["idstr"]
|
2022-05-31 22:45:42 +02:00
|
|
|
|
2022-06-03 16:36:22 +02:00
|
|
|
def _pagination(self, endpoint, params):
|
|
|
|
url = self.root + "/ajax" + endpoint
|
2022-01-31 19:13:02 +01:00
|
|
|
headers = {
|
|
|
|
"X-Requested-With": "XMLHttpRequest",
|
|
|
|
"X-XSRF-TOKEN": None,
|
2022-06-03 16:36:22 +02:00
|
|
|
"Referer": "{}/u/{}".format(self.root, params["uid"]),
|
2022-01-31 19:13:02 +01:00
|
|
|
}
|
2019-02-16 22:56:04 +01:00
|
|
|
|
|
|
|
while True:
|
2022-01-31 19:13:02 +01:00
|
|
|
response = self.request(url, params=params, headers=headers)
|
2022-05-31 11:57:45 +02:00
|
|
|
headers["Accept"] = "application/json, text/plain, */*"
|
2022-01-31 19:13:02 +01:00
|
|
|
headers["X-XSRF-TOKEN"] = response.cookies.get("XSRF-TOKEN")
|
2019-02-16 22:56:04 +01:00
|
|
|
|
2022-04-27 20:23:11 +02:00
|
|
|
data = response.json()
|
|
|
|
if not data.get("ok"):
|
|
|
|
self.log.debug(response.content)
|
|
|
|
if "since_id" not in params: # first iteration
|
|
|
|
raise exception.StopExtraction(
|
|
|
|
'"%s"', data.get("msg") or "unknown error")
|
|
|
|
|
2022-06-03 16:36:22 +02:00
|
|
|
data = data["data"]
|
|
|
|
statuses = data["list"]
|
2022-05-31 11:57:45 +02:00
|
|
|
if not statuses:
|
2022-01-31 19:13:02 +01:00
|
|
|
return
|
2022-05-31 11:57:45 +02:00
|
|
|
yield from statuses
|
|
|
|
|
2022-08-11 12:22:14 +02:00
|
|
|
if "next_cursor" in data: # videos, newvideo
|
2023-07-04 17:41:22 +02:00
|
|
|
if data["next_cursor"] == -1:
|
|
|
|
return
|
2022-06-03 16:36:22 +02:00
|
|
|
params["cursor"] = data["next_cursor"]
|
2022-08-11 12:22:14 +02:00
|
|
|
elif "page" in params: # home, article
|
2022-06-03 16:36:22 +02:00
|
|
|
params["page"] += 1
|
2022-08-11 12:22:14 +02:00
|
|
|
elif data["since_id"]: # album
|
2022-06-03 16:36:22 +02:00
|
|
|
params["sinceid"] = data["since_id"]
|
2022-08-11 12:22:14 +02:00
|
|
|
else: # feed, last album page
|
|
|
|
try:
|
|
|
|
params["since_id"] = statuses[-1]["id"] - 1
|
|
|
|
except KeyError:
|
|
|
|
return
|
2022-06-03 16:36:22 +02:00
|
|
|
|
|
|
|
def _sina_visitor_system(self, response):
|
|
|
|
self.log.info("Sina Visitor System")
|
|
|
|
|
|
|
|
passport_url = "https://passport.weibo.com/visitor/genvisitor"
|
|
|
|
headers = {"Referer": response.url}
|
|
|
|
data = {
|
|
|
|
"cb": "gen_callback",
|
2023-10-30 22:14:52 +01:00
|
|
|
"fp": '{"os":"1","browser":"Gecko109,0,0,0","fonts":"undefined",'
|
2022-06-03 16:36:22 +02:00
|
|
|
'"screenInfo":"1920*1080*24","plugins":""}',
|
|
|
|
}
|
|
|
|
|
|
|
|
page = Extractor.request(
|
|
|
|
self, passport_url, method="POST", headers=headers, data=data).text
|
2023-02-07 23:14:53 +01:00
|
|
|
data = util.json_loads(text.extr(page, "(", ");"))["data"]
|
2022-06-03 16:36:22 +02:00
|
|
|
|
|
|
|
passport_url = "https://passport.weibo.com/visitor/visitor"
|
|
|
|
params = {
|
|
|
|
"a" : "incarnate",
|
|
|
|
"t" : data["tid"],
|
2023-10-30 22:14:52 +01:00
|
|
|
"w" : "3" if data.get("new_tid") else "2",
|
|
|
|
"c" : "{:>03}".format(data.get("confidence") or 100),
|
2022-06-03 16:36:22 +02:00
|
|
|
"gc" : "",
|
|
|
|
"cb" : "cross_domain",
|
|
|
|
"from" : "weibo",
|
|
|
|
"_rand": random.random(),
|
|
|
|
}
|
|
|
|
response = Extractor.request(self, passport_url, params=params)
|
|
|
|
_cookie_cache.update("", response.cookies)
|
|
|
|
|
|
|
|
|
|
|
|
class WeiboUserExtractor(WeiboExtractor):
|
|
|
|
"""Extractor for weibo user profiles"""
|
|
|
|
subcategory = "user"
|
|
|
|
pattern = USER_PATTERN + r"(?:$|#)"
|
2023-09-11 16:30:55 +02:00
|
|
|
example = "https://weibo.com/USER"
|
2022-06-03 16:36:22 +02:00
|
|
|
|
|
|
|
def items(self):
|
2022-06-05 21:37:57 +02:00
|
|
|
base = "{}/u/{}?tabtype=".format(self.root, self._user_id())
|
2022-06-03 16:36:22 +02:00
|
|
|
return self._dispatch_extractors((
|
2022-06-12 17:55:23 +02:00
|
|
|
(WeiboHomeExtractor , base + "home"),
|
|
|
|
(WeiboFeedExtractor , base + "feed"),
|
|
|
|
(WeiboVideosExtractor , base + "video"),
|
|
|
|
(WeiboNewvideoExtractor, base + "newVideo"),
|
|
|
|
(WeiboAlbumExtractor , base + "album"),
|
2022-06-03 16:36:22 +02:00
|
|
|
), ("feed",))
|
|
|
|
|
|
|
|
|
|
|
|
class WeiboHomeExtractor(WeiboExtractor):
|
|
|
|
"""Extractor for weibo 'home' listings"""
|
|
|
|
subcategory = "home"
|
|
|
|
pattern = USER_PATTERN + r"\?tabtype=home"
|
2023-09-11 16:30:55 +02:00
|
|
|
example = "https://weibo.com/USER?tabtype=home"
|
2022-06-03 16:36:22 +02:00
|
|
|
|
|
|
|
def statuses(self):
|
|
|
|
endpoint = "/profile/myhot"
|
|
|
|
params = {"uid": self._user_id(), "page": 1, "feature": "2"}
|
|
|
|
return self._pagination(endpoint, params)
|
|
|
|
|
|
|
|
|
|
|
|
class WeiboFeedExtractor(WeiboExtractor):
|
|
|
|
"""Extractor for weibo user feeds"""
|
|
|
|
subcategory = "feed"
|
|
|
|
pattern = USER_PATTERN + r"\?tabtype=feed"
|
2023-09-11 16:30:55 +02:00
|
|
|
example = "https://weibo.com/USER?tabtype=feed"
|
2022-06-03 16:36:22 +02:00
|
|
|
|
|
|
|
def statuses(self):
|
|
|
|
endpoint = "/statuses/mymblog"
|
|
|
|
params = {"uid": self._user_id(), "feature": "0"}
|
|
|
|
return self._pagination(endpoint, params)
|
|
|
|
|
|
|
|
|
|
|
|
class WeiboVideosExtractor(WeiboExtractor):
|
2022-06-12 17:55:23 +02:00
|
|
|
"""Extractor for weibo 'video' listings"""
|
2022-06-03 16:36:22 +02:00
|
|
|
subcategory = "videos"
|
2022-06-12 17:55:23 +02:00
|
|
|
pattern = USER_PATTERN + r"\?tabtype=video"
|
2023-09-11 16:30:55 +02:00
|
|
|
example = "https://weibo.com/USER?tabtype=video"
|
2022-06-12 17:55:23 +02:00
|
|
|
|
|
|
|
def statuses(self):
|
|
|
|
endpoint = "/profile/getprofilevideolist"
|
|
|
|
params = {"uid": self._user_id()}
|
|
|
|
|
|
|
|
for status in self._pagination(endpoint, params):
|
|
|
|
yield status["video_detail_vo"]
|
|
|
|
|
|
|
|
|
|
|
|
class WeiboNewvideoExtractor(WeiboExtractor):
|
|
|
|
"""Extractor for weibo 'newVideo' listings"""
|
|
|
|
subcategory = "newvideo"
|
2022-06-03 16:36:22 +02:00
|
|
|
pattern = USER_PATTERN + r"\?tabtype=newVideo"
|
2023-09-11 16:30:55 +02:00
|
|
|
example = "https://weibo.com/USER?tabtype=newVideo"
|
2022-06-03 16:36:22 +02:00
|
|
|
|
|
|
|
def statuses(self):
|
|
|
|
endpoint = "/profile/getWaterFallContent"
|
|
|
|
params = {"uid": self._user_id()}
|
|
|
|
return self._pagination(endpoint, params)
|
|
|
|
|
|
|
|
|
|
|
|
class WeiboArticleExtractor(WeiboExtractor):
|
|
|
|
"""Extractor for weibo 'article' listings"""
|
|
|
|
subcategory = "article"
|
|
|
|
pattern = USER_PATTERN + r"\?tabtype=article"
|
2023-09-11 16:30:55 +02:00
|
|
|
example = "https://weibo.com/USER?tabtype=article"
|
2022-06-03 16:36:22 +02:00
|
|
|
|
|
|
|
def statuses(self):
|
|
|
|
endpoint = "/statuses/mymblog"
|
|
|
|
params = {"uid": self._user_id(), "page": 1, "feature": "10"}
|
|
|
|
return self._pagination(endpoint, params)
|
|
|
|
|
|
|
|
|
|
|
|
class WeiboAlbumExtractor(WeiboExtractor):
|
|
|
|
"""Extractor for weibo 'album' listings"""
|
|
|
|
subcategory = "album"
|
|
|
|
pattern = USER_PATTERN + r"\?tabtype=album"
|
2023-09-11 16:30:55 +02:00
|
|
|
example = "https://weibo.com/USER?tabtype=album"
|
2022-06-03 16:36:22 +02:00
|
|
|
|
|
|
|
def statuses(self):
|
|
|
|
endpoint = "/profile/getImageWall"
|
|
|
|
params = {"uid": self._user_id()}
|
|
|
|
|
|
|
|
seen = set()
|
|
|
|
for image in self._pagination(endpoint, params):
|
|
|
|
mid = image["mid"]
|
|
|
|
if mid not in seen:
|
|
|
|
seen.add(mid)
|
2022-07-11 14:57:27 +02:00
|
|
|
status = self._status_by_id(mid)
|
|
|
|
if status.get("ok") != 1:
|
|
|
|
self.log.debug("Skipping status %s (%s)", mid, status)
|
|
|
|
else:
|
|
|
|
yield status
|
2019-02-16 22:56:04 +01:00
|
|
|
|
|
|
|
|
|
|
|
class WeiboStatusExtractor(WeiboExtractor):
|
|
|
|
"""Extractor for images from a status on weibo.cn"""
|
|
|
|
subcategory = "status"
|
2022-06-03 16:36:22 +02:00
|
|
|
pattern = BASE_PATTERN + r"/(detail|status|\d+)/(\w+)"
|
2023-09-11 16:30:55 +02:00
|
|
|
example = "https://weibo.com/detail/12345"
|
2019-02-16 22:56:04 +01:00
|
|
|
|
|
|
|
def statuses(self):
|
2022-07-11 14:57:27 +02:00
|
|
|
status = self._status_by_id(self.user)
|
|
|
|
if status.get("ok") != 1:
|
|
|
|
self.log.debug(status)
|
|
|
|
raise exception.NotFoundError("status")
|
|
|
|
return (status,)
|
2022-05-31 11:57:45 +02:00
|
|
|
|
|
|
|
|
2022-11-30 11:36:46 +01:00
|
|
|
@cache(maxage=365*86400)
|
2022-05-31 11:57:45 +02:00
|
|
|
def _cookie_cache():
|
|
|
|
return None
|