1
0
mirror of https://github.com/yt-dlp/yt-dlp.git synced 2024-11-05 10:42:37 +01:00
yt-dlp/youtube_dl/extractor/viki.py

323 lines
11 KiB
Python
Raw Normal View History

# coding: utf-8
2014-03-21 00:53:18 +01:00
from __future__ import unicode_literals
import json
2015-05-20 21:44:05 +02:00
import time
import hmac
import hashlib
2015-05-20 22:08:13 +02:00
import itertools
2013-11-24 07:30:05 +01:00
from ..utils import (
ExtractorError,
2015-05-20 21:44:05 +02:00
int_or_none,
parse_age_limit,
parse_iso8601,
2013-11-24 07:30:05 +01:00
)
from ..compat import compat_urllib_request
2015-02-18 20:37:16 +01:00
from .common import InfoExtractor
2013-11-24 07:30:05 +01:00
2015-05-20 21:44:05 +02:00
class VikiBaseIE(InfoExtractor):
2015-05-21 18:27:22 +02:00
_VALID_URL_BASE = r'https?://(?:www\.)?viki\.(?:com|net|mx|jp|fr)/'
2015-05-20 21:44:05 +02:00
_API_QUERY_TEMPLATE = '/v4/%sapp=%s&t=%s&site=www.viki.com'
_API_URL_TEMPLATE = 'http://api.viki.io%s&sig=%s'
_APP = '65535a'
_APP_VERSION = '2.2.5.1428709186'
_APP_SECRET = '-$iJ}@p7!G@SyU/je1bEyWg}upLu-6V6-Lg9VD(]siH,r.,m-r|ulZ,U4LC/SeR)'
_NETRC_MACHINE = 'viki'
_token = None
def _prepare_call(self, path, timestamp=None, post_data=None):
2015-05-20 21:44:05 +02:00
path += '?' if '?' not in path else '&'
if not timestamp:
timestamp = int(time.time())
query = self._API_QUERY_TEMPLATE % (path, self._APP, timestamp)
if self._token:
query += '&token=%s' % self._token
2015-05-20 21:44:05 +02:00
sig = hmac.new(
self._APP_SECRET.encode('ascii'),
query.encode('ascii'),
hashlib.sha1
).hexdigest()
url = self._API_URL_TEMPLATE % (query, sig)
return compat_urllib_request.Request(
url, json.dumps(post_data).encode('utf-8')) if post_data else url
2015-05-20 21:44:05 +02:00
def _call_api(self, path, video_id, note, timestamp=None, post_data=None):
2015-05-20 21:44:05 +02:00
resp = self._download_json(
self._prepare_call(path, timestamp, post_data), video_id, note)
2015-05-20 21:44:05 +02:00
error = resp.get('error')
if error:
if error == 'invalid timestamp':
resp = self._download_json(
self._prepare_call(path, int(resp['current_timestamp']), post_data),
2015-05-20 21:44:05 +02:00
video_id, '%s (retry)' % note)
error = resp.get('error')
if error:
self._raise_error(resp['error'])
return resp
def _raise_error(self, error):
raise ExtractorError(
'%s returned error: %s' % (self.IE_NAME, error),
expected=True)
def _real_initialize(self):
self._login()
def _login(self):
(username, password) = self._get_login_info()
if username is None:
return
login_form = {
'login_id': username,
'password': password,
}
login = self._call_api(
'sessions.json', None,
'Logging in as %s' % username, post_data=login_form)
self._token = login.get('token')
if not self._token:
self.report_warning('Unable to get session token, login has probably failed')
2015-05-20 21:44:05 +02:00
class VikiIE(VikiBaseIE):
2014-03-21 00:53:18 +01:00
IE_NAME = 'viki'
2015-05-21 18:27:22 +02:00
_VALID_URL = r'%s(?:videos|player)/(?P<id>[0-9]+v)' % VikiBaseIE._VALID_URL_BASE
_TESTS = [{
2014-03-21 00:53:18 +01:00
'url': 'http://www.viki.com/videos/1023585v-heirs-episode-14',
'info_dict': {
'id': '1023585v',
'ext': 'mp4',
'title': 'Heirs Episode 14',
'uploader': 'SBS',
'description': 'md5:c4b17b9626dd4b143dcc4d855ba3474e',
'upload_date': '20131121',
'age_limit': 13,
},
2014-03-21 00:53:18 +01:00
'skip': 'Blocked in the US',
}, {
2015-05-20 21:44:05 +02:00
# clip
'url': 'http://www.viki.com/videos/1067139v-the-avengers-age-of-ultron-press-conference',
2015-05-20 21:44:05 +02:00
'md5': '86c0b5dbd4d83a6611a79987cc7a1989',
'info_dict': {
'id': '1067139v',
'ext': 'mp4',
2015-05-20 21:44:05 +02:00
'title': "'The Avengers: Age of Ultron' Press Conference",
'description': 'md5:d70b2f9428f5488321bfe1db10d612ea',
2015-05-20 21:44:05 +02:00
'duration': 352,
'timestamp': 1430380829,
'upload_date': '20150430',
2015-05-20 21:44:05 +02:00
'uploader': 'Arirang TV',
'like_count': int,
'age_limit': 0,
}
2015-05-01 19:19:06 +02:00
}, {
'url': 'http://www.viki.com/videos/1048879v-ankhon-dekhi',
'info_dict': {
'id': '1048879v',
'ext': 'mp4',
'title': 'Ankhon Dekhi',
2015-05-20 21:44:05 +02:00
'duration': 6512,
'timestamp': 1408532356,
'upload_date': '20140820',
'uploader': 'Spuul',
'like_count': int,
'age_limit': 13,
2015-05-01 19:19:06 +02:00
},
'params': {
2015-05-20 21:44:05 +02:00
# m3u8 download
2015-05-01 19:19:06 +02:00
'skip_download': True,
}
2015-05-20 21:44:05 +02:00
}, {
# episode
'url': 'http://www.viki.com/videos/44699v-boys-over-flowers-episode-1',
'md5': '190f3ef426005ba3a080a63325955bc3',
'info_dict': {
'id': '44699v',
'ext': 'mp4',
'title': 'Boys Over Flowers - Episode 1',
'description': 'md5:52617e4f729c7d03bfd4bcbbb6e946f2',
'duration': 4155,
'timestamp': 1270496524,
'upload_date': '20100405',
'uploader': 'group8',
'like_count': int,
'age_limit': 13,
}
}, {
# youtube external
'url': 'http://www.viki.com/videos/50562v-poor-nastya-complete-episode-1',
'md5': '216d1afdc0c64d1febc1e9f2bd4b864b',
'info_dict': {
'id': '50562v',
'ext': 'mp4',
'title': 'Poor Nastya [COMPLETE] - Episode 1',
'description': '',
'duration': 607,
'timestamp': 1274949505,
'upload_date': '20101213',
'uploader': 'ad14065n',
'uploader_id': 'ad14065n',
'like_count': int,
'age_limit': 13,
}
2015-05-20 21:44:05 +02:00
}, {
'url': 'http://www.viki.com/player/44699v',
'only_matching': True,
}]
2013-11-24 07:30:05 +01:00
def _real_extract(self, url):
2015-01-07 07:21:24 +01:00
video_id = self._match_id(url)
2013-11-24 07:30:05 +01:00
2015-05-20 21:44:05 +02:00
video = self._call_api(
'videos/%s.json' % video_id, video_id, 'Downloading video JSON')
title = None
titles = video.get('titles')
if titles:
title = titles.get('en') or titles[titles.keys()[0]]
if not title:
title = 'Episode %d' % video.get('number') if video.get('type') == 'episode' else video.get('id') or video_id
container_titles = video.get('container', {}).get('titles')
if container_titles:
2015-05-20 22:38:43 +02:00
container_title = container_titles.get('en') or container_titles[container_titles.keys()[0]]
2015-05-20 21:44:05 +02:00
title = '%s - %s' % (container_title, title)
descriptions = video.get('descriptions')
description = descriptions.get('en') or descriptions[titles.keys()[0]] if descriptions else None
duration = int_or_none(video.get('duration'))
timestamp = parse_iso8601(video.get('created_at'))
uploader = video.get('author')
like_count = int_or_none(video.get('likes', {}).get('count'))
age_limit = parse_age_limit(video.get('rating'))
thumbnails = []
for thumbnail_id, thumbnail in video.get('images', {}).items():
thumbnails.append({
'id': thumbnail_id,
'url': thumbnail.get('url'),
})
subtitles = {}
for subtitle_lang, _ in video.get('subtitle_completions', {}).items():
subtitles[subtitle_lang] = [{
'ext': subtitles_format,
'url': self._prepare_call(
'videos/%s/subtitles/%s.%s' % (video_id, subtitle_lang, subtitles_format)),
} for subtitles_format in ('srt', 'vtt')]
2013-11-24 07:30:05 +01:00
result = {
2013-11-24 07:30:05 +01:00
'id': video_id,
'title': title,
'description': description,
2015-05-20 21:44:05 +02:00
'duration': duration,
'timestamp': timestamp,
2013-11-24 07:30:05 +01:00
'uploader': uploader,
2015-05-20 21:44:05 +02:00
'like_count': like_count,
'age_limit': age_limit,
'thumbnails': thumbnails,
'subtitles': subtitles,
2013-11-24 07:30:05 +01:00
}
streams = self._call_api(
'videos/%s/streams.json' % video_id, video_id,
'Downloading video streams JSON')
if 'external' in streams:
result.update({
'_type': 'url_transparent',
'url': streams['external']['url'],
})
return result
formats = []
for format_id, stream_dict in streams.items():
height = self._search_regex(
r'^(\d+)[pP]$', format_id, 'height', default=None)
for protocol, format_dict in stream_dict.items():
if format_id == 'm3u8':
formats = self._extract_m3u8_formats(
format_dict['url'], video_id, 'mp4', m3u8_id='m3u8-%s' % protocol)
else:
formats.append({
'url': format_dict['url'],
'format_id': '%s-%s' % (format_id, protocol),
'height': height,
})
self._sort_formats(formats)
result['formats'] = formats
return result
2015-05-16 09:43:13 +02:00
2015-05-20 22:08:13 +02:00
class VikiChannelIE(VikiBaseIE):
IE_NAME = 'viki:channel'
2015-05-21 18:27:22 +02:00
_VALID_URL = r'%s(?:tv|news|movies|artists)/(?P<id>[0-9]+c)' % VikiBaseIE._VALID_URL_BASE
2015-05-16 09:43:13 +02:00
_TESTS = [{
'url': 'http://www.viki.com/tv/50c-boys-over-flowers',
'info_dict': {
'id': '50c',
'title': 'Boys Over Flowers',
'description': 'md5:ecd3cff47967fe193cff37c0bec52790',
},
'playlist_count': 70,
}, {
'url': 'http://www.viki.com/tv/1354c-poor-nastya-complete',
'info_dict': {
'id': '1354c',
'title': 'Poor Nastya [COMPLETE]',
'description': 'md5:05bf5471385aa8b21c18ad450e350525',
},
'playlist_count': 127,
}, {
'url': 'http://www.viki.com/news/24569c-showbiz-korea',
'only_matching': True,
}, {
'url': 'http://www.viki.com/movies/22047c-pride-and-prejudice-2005',
'only_matching': True,
}, {
'url': 'http://www.viki.com/artists/2141c-shinee',
'only_matching': True,
2015-05-16 09:43:13 +02:00
}]
2015-05-20 22:08:13 +02:00
_PER_PAGE = 25
2015-05-16 09:43:13 +02:00
def _real_extract(self, url):
2015-05-20 17:28:04 +02:00
channel_id = self._match_id(url)
2015-05-16 09:43:13 +02:00
2015-05-20 22:08:13 +02:00
channel = self._call_api(
'containers/%s.json' % channel_id, channel_id,
'Downloading channel JSON')
2015-05-20 17:28:04 +02:00
titles = channel['titles']
title = titles.get('en') or titles[titles.keys()[0]]
descriptions = channel['descriptions']
description = descriptions.get('en') or descriptions[descriptions.keys()[0]]
2015-05-16 09:43:13 +02:00
entries = []
for video_type in ('episodes', 'clips', 'movies'):
2015-05-20 22:08:13 +02:00
for page_num in itertools.count(1):
page = self._call_api(
'containers/%s/%s.json?per_page=%d&sort=number&direction=asc&with_paging=true&page=%d'
% (channel_id, video_type, self._PER_PAGE, page_num), channel_id,
'Downloading %s JSON page #%d' % (video_type, page_num))
2015-05-20 17:28:04 +02:00
for video in page['response']:
video_id = video['id']
entries.append(self.url_result(
2015-05-20 22:08:13 +02:00
'http://www.viki.com/videos/%s' % video_id, 'Viki'))
if not page['pagination']['next']:
break
2015-05-16 09:43:13 +02:00
2015-05-20 17:28:04 +02:00
return self.playlist_result(entries, channel_id, title, description)