1
0
mirror of https://github.com/yt-dlp/yt-dlp.git synced 2024-11-20 01:42:50 +01:00

Incorporate changes suggested during code review

This commit is contained in:
Frank Aurich 2022-12-12 16:28:15 +01:00
parent 0408452f4b
commit 5d9a4a223c

View File

@ -1,19 +1,15 @@
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor from .common import InfoExtractor
from ..utils import ( from ..utils import (
determine_ext, determine_ext,
int_or_none,
parse_duration, parse_duration,
parse_iso8601 parse_iso8601,
traverse_obj
) )
class KikaIE(InfoExtractor): class KikaIE(InfoExtractor):
IE_DESC = 'KiKA.de' IE_DESC = 'KiKA.de'
_VALID_URL = r'https?://(?:www\.)?kika\.de/(?:.*)/[a-z-]+-?(?P<id>\d+)(?:_.+?)?' _VALID_URL = r'https?://(?:www\.)?kika\.de/(?:.*)/[a-z-]+-?(?P<id>\d+)'
_GEO_COUNTRIES = ['DE'] _GEO_COUNTRIES = ['DE']
_TESTS = [{ _TESTS = [{
@ -35,13 +31,8 @@ def _real_extract(self, url):
video_id = self._match_id(url) video_id = self._match_id(url)
doc = self._download_json("https://www.kika.de/_next-api/proxy/v1/videos/video%s" % (video_id), video_id) doc = self._download_json("https://www.kika.de/_next-api/proxy/v1/videos/video%s" % (video_id), video_id)
title = doc.get('title') video_assets = self._download_json(doc.get('assets').get('url'), video_id)
timestamp = parse_iso8601(doc.get('date')) formats = list(self._extract_formats(video_assets, video_id))
duration = parse_duration(doc.get('duration'))
video_url = doc.get('assets').get('url')
video_assets = self._download_json(video_url, video_id)
formats = self._extract_formats(video_assets, video_id)
subtitles = {} subtitles = {}
ttml_resource = video_assets.get('videoSubtitle') ttml_resource = video_assets.get('videoSubtitle')
@ -52,42 +43,40 @@ def _real_extract(self, url):
}] }]
webvtt_resource = video_assets.get('webvttUrl') webvtt_resource = video_assets.get('webvttUrl')
if webvtt_resource: if webvtt_resource:
vtt = { subtitles.setdefault('de', []).append({
'url': webvtt_resource, 'url': webvtt_resource,
'ext': 'webvtt' 'ext': 'vtt'
} })
subtitles['de'].append(vtt)
return { return {
'id': video_id, 'id': video_id,
'title': title, 'title': doc.get('title'),
'description': doc['description'], 'description': doc.get('description'),
'timestamp': timestamp, 'timestamp': parse_iso8601(doc.get('date')),
'duration': duration, 'duration': parse_duration(doc.get('duration')),
'formats': formats, 'formats': formats,
'subtitles': subtitles, 'subtitles': subtitles,
'uploader': 'KIKA' 'uploader': 'KIKA'
} }
def _extract_formats(self, media_info, video_id): def _extract_formats(self, media_info, video_id):
streams = media_info.get('assets', []) for media in media_info['assets']:
formats = [] stream_url = media.get('url')
for num, media in enumerate(streams): if not stream_url:
stream_url = media.get("url") continue
ext = determine_ext(stream_url) ext = determine_ext(stream_url)
if ext == 'm3u8': if ext == 'm3u8':
formats.extend(self._extract_m3u8_formats( yield from self._extract_m3u8_formats(
stream_url, video_id, 'mp4', 'm3u8_native', stream_url, video_id, 'mp4', m3u8_id='hls', fatal=False)
m3u8_id='hls', fatal=False))
else: else:
f = { yield {
'url': stream_url, 'url': stream_url,
'format_id': 'a%s-%s' % (num, ext), 'format_id': ext,
'width': media.get('frameWidth'), **traverse_obj(media, {
'height': media.get('frameHeight'), 'width': 'frameWidth',
'filesize': int_or_none(media.get('fileSize')), 'height': 'frameHeight',
'abr': int_or_none(media.get('bitrateAudio')), 'filesize': 'fileSize',
'vbr': int_or_none(media.get('bitrateVideo')), 'abr': 'bitrateAudio',
'vbr': 'bitrateVideo'
})
} }
formats.append(f)
return formats