1
0
mirror of https://github.com/yt-dlp/yt-dlp.git synced 2024-11-09 12:42:28 +01:00
yt-dlp/yt_dlp/extractor/zoom.py

78 lines
2.9 KiB
Python
Raw Normal View History

2020-04-12 23:27:58 +02:00
# coding: utf-8
from __future__ import unicode_literals
2020-04-12 23:27:58 +02:00
from .common import InfoExtractor
from ..utils import (
ExtractorError,
2020-04-12 23:27:58 +02:00
int_or_none,
js_to_json,
parse_filesize,
urlencode_postdata,
urljoin,
2020-04-12 23:27:58 +02:00
)
2020-11-04 21:13:51 +01:00
class ZoomIE(InfoExtractor):
IE_NAME = 'zoom'
_VALID_URL = r'(?P<base_url>https?://(?:[^.]+\.)?zoom.us/)rec(?:ording)?/(?:play|share)/(?P<id>[A-Za-z0-9_.-]+)'
2020-04-13 00:18:40 +02:00
_TEST = {
'url': 'https://economist.zoom.us/rec/play/dUk_CNBETmZ5VA2BwEl-jjakPpJ3M1pcfVYAPRsoIbEByGsLjUZtaa4yCATQuOL3der8BlTwxQePl_j0.EImBkXzTIaPvdZO5',
'md5': 'ab445e8c911fddc4f9adc842c2c5d434',
2020-04-12 23:27:58 +02:00
'info_dict': {
'id': 'dUk_CNBETmZ5VA2BwEl-jjakPpJ3M1pcfVYAPRsoIbEByGsLjUZtaa4yCATQuOL3der8BlTwxQePl_j0.EImBkXzTIaPvdZO5',
'ext': 'mp4',
'title': 'China\'s "two sessions" and the new five-year plan',
2020-04-12 23:27:58 +02:00
}
2020-04-13 00:18:40 +02:00
}
2020-04-12 23:27:58 +02:00
def _real_extract(self, url):
base_url, play_id = self._match_valid_url(url).groups()
webpage = self._download_webpage(url, play_id)
try:
form = self._form_hidden_inputs('password_form', webpage)
except ExtractorError:
form = None
if form:
password = self.get_param('videopassword')
if not password:
raise ExtractorError(
'This video is protected by a passcode, use the --video-password option', expected=True)
is_meeting = form.get('useWhichPasswd') == 'meeting'
validation = self._download_json(
base_url + 'rec/validate%s_passwd' % ('_meet' if is_meeting else ''),
play_id, 'Validating passcode', 'Wrong passcode', data=urlencode_postdata({
'id': form[('meet' if is_meeting else 'file') + 'Id'],
'passwd': password,
'action': form.get('action'),
}))
if not validation.get('status'):
raise ExtractorError(validation['errorMessage'], expected=True)
webpage = self._download_webpage(url, play_id)
2020-04-12 23:27:58 +02:00
data = self._parse_json(self._search_regex(
r'(?s)window\.__data__\s*=\s*({.+?});',
webpage, 'data'), play_id, js_to_json)
2020-04-12 23:27:58 +02:00
subtitles = {}
for _type in ('transcript', 'cc'):
if data.get('%sUrl' % _type):
subtitles[_type] = [{
'url': urljoin(base_url, data['%sUrl' % _type]),
'ext': 'vtt',
}]
2020-04-12 23:27:58 +02:00
return {
'id': play_id,
'title': data['topic'],
'url': data['viewMp4Url'],
'subtitles': subtitles,
'width': int_or_none(data.get('viewResolvtionsWidth')),
'height': int_or_none(data.get('viewResolvtionsHeight')),
'http_headers': {
'Referer': base_url,
},
'filesize_approx': parse_filesize(data.get('fileSize')),
2020-04-13 00:18:40 +02:00
}