1
0
mirror of https://github.com/yt-dlp/yt-dlp.git synced 2024-11-02 17:22:31 +01:00

[nfb] Modernize and extract subtitles

This commit is contained in:
Sergey M․ 2016-05-18 00:25:15 +06:00
parent 15cda1ef77
commit 11e6a0b641
No known key found for this signature in database
GPG Key ID: 2C393E0F18A9236D

View File

@ -2,8 +2,12 @@
from .common import InfoExtractor from .common import InfoExtractor
from ..utils import ( from ..utils import (
sanitized_Request, clean_html,
determine_ext,
int_or_none,
qualities,
urlencode_postdata, urlencode_postdata,
xpath_text,
) )
@ -16,12 +20,12 @@ class NFBIE(InfoExtractor):
'url': 'https://www.nfb.ca/film/qallunaat_why_white_people_are_funny', 'url': 'https://www.nfb.ca/film/qallunaat_why_white_people_are_funny',
'info_dict': { 'info_dict': {
'id': 'qallunaat_why_white_people_are_funny', 'id': 'qallunaat_why_white_people_are_funny',
'ext': 'mp4', 'ext': 'flv',
'title': 'Qallunaat! Why White People Are Funny ', 'title': 'Qallunaat! Why White People Are Funny ',
'description': 'md5:836d8aff55e087d04d9f6df554d4e038', 'description': 'md5:6b8e32dde3abf91e58857b174916620c',
'duration': 3128, 'duration': 3128,
'creator': 'Mark Sandiford',
'uploader': 'Mark Sandiford', 'uploader': 'Mark Sandiford',
'uploader_id': 'mark-sandiford',
}, },
'params': { 'params': {
# rtmp download # rtmp download
@ -31,64 +35,78 @@ class NFBIE(InfoExtractor):
def _real_extract(self, url): def _real_extract(self, url):
video_id = self._match_id(url) video_id = self._match_id(url)
page = self._download_webpage(
'https://www.nfb.ca/film/%s' % video_id, video_id,
'Downloading film page')
uploader_id = self._html_search_regex(r'<a class="director-link" href="/explore-all-directors/([^/]+)/"', config = self._download_xml(
page, 'director id', fatal=False)
uploader = self._og_search_property('video:director', page, 'director name')
request = sanitized_Request(
'https://www.nfb.ca/film/%s/player_config' % video_id, 'https://www.nfb.ca/film/%s/player_config' % video_id,
urlencode_postdata({'getConfig': 'true'})) video_id, 'Downloading player config XML',
request.add_header('Content-Type', 'application/x-www-form-urlencoded') data=urlencode_postdata({'getConfig': 'true'}),
request.add_header('X-NFB-Referer', 'http://www.nfb.ca/medias/flash/NFBVideoPlayer.swf') headers={
'Content-Type': 'application/x-www-form-urlencoded',
'X-NFB-Referer': 'http://www.nfb.ca/medias/flash/NFBVideoPlayer.swf'
})
config = self._download_xml(request, video_id, 'Downloading player config XML') title, description, thumbnail, duration, uploader, author = [None] * 6
thumbnails, formats = [[]] * 2
title = None subtitles = {}
description = None
thumbnail = None
duration = None
formats = []
def extract_thumbnail(media):
thumbnails = {}
for asset in media.findall('assets/asset'):
thumbnails[asset.get('quality')] = asset.find('default/url').text
if not thumbnails:
return None
if 'high' in thumbnails:
return thumbnails['high']
return list(thumbnails.values())[0]
for media in config.findall('./player/stream/media'): for media in config.findall('./player/stream/media'):
if media.get('type') == 'posterImage': if media.get('type') == 'posterImage':
thumbnail = extract_thumbnail(media) quality_key = qualities(('low', 'high'))
elif media.get('type') == 'video': thumbnails = []
duration = int(media.get('duration'))
title = media.find('title').text
description = media.find('description').text
# It seems assets always go from lower to better quality, so no need to sort
for asset in media.findall('assets/asset'): for asset in media.findall('assets/asset'):
for x in asset: asset_url = xpath_text(asset, 'default/url', default=None)
if not asset_url:
continue
quality = asset.get('quality')
thumbnails.append({
'url': asset_url,
'id': quality,
'preference': quality_key(quality),
})
elif media.get('type') == 'video':
title = xpath_text(media, 'title', fatal=True)
for asset in media.findall('assets/asset'):
quality = asset.get('quality')
height = int_or_none(self._search_regex(
r'^(\d+)[pP]$', quality or '', 'height', default=None))
for node in asset:
streamer = xpath_text(node, 'streamerURI', default=None)
if not streamer:
continue
play_path = xpath_text(node, 'url', default=None)
if not play_path:
continue
formats.append({ formats.append({
'url': x.find('streamerURI').text, 'url': streamer,
'app': x.find('streamerURI').text.split('/', 3)[3], 'app': streamer.split('/', 3)[3],
'play_path': x.find('url').text, 'play_path': play_path,
'rtmp_live': False, 'rtmp_live': False,
'ext': 'mp4', 'ext': 'flv',
'format_id': '%s-%s' % (x.tag, asset.get('quality')), 'format_id': '%s-%s' % (node.tag, quality) if quality else node.tag,
'height': height,
}) })
self._sort_formats(formats)
description = clean_html(xpath_text(media, 'description'))
uploader = xpath_text(media, 'author')
duration = int_or_none(media.get('duration'))
for subtitle in media.findall('./subtitles/subtitle'):
subtitle_url = xpath_text(subtitle, 'url', default=None)
if not subtitle_url:
continue
lang = xpath_text(subtitle, 'lang', default='en')
subtitles.setdefault(lang, []).append({
'url': subtitle_url,
'ext': (subtitle.get('format') or determine_ext(subtitle_url)).lower(),
})
return { return {
'id': video_id, 'id': video_id,
'title': title, 'title': title,
'description': description, 'description': description,
'thumbnail': thumbnail, 'thumbnails': thumbnails,
'duration': duration, 'duration': duration,
'creator': uploader,
'uploader': uploader, 'uploader': uploader,
'uploader_id': uploader_id,
'formats': formats, 'formats': formats,
'subtitles': subtitles,
} }