1
0
mirror of https://github.com/yt-dlp/yt-dlp.git synced 2024-11-02 17:22:31 +01:00

[Hidive] Fix duplicate and incorrect formats

This commit is contained in:
pukkandan 2021-10-06 10:53:22 +05:30
parent 49e7e9c3ce
commit 705e7c2005
No known key found for this signature in database
GPG Key ID: 0F00D95A001F4698

View File

@ -1,8 +1,6 @@
# coding: utf-8 # coding: utf-8
from __future__ import unicode_literals from __future__ import unicode_literals
import re
from .common import InfoExtractor from .common import InfoExtractor
from ..utils import ( from ..utils import (
ExtractorError, ExtractorError,
@ -14,7 +12,7 @@
class HiDiveIE(InfoExtractor): class HiDiveIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?hidive\.com/stream/(?P<title>[^/]+)/(?P<key>[^/?#&]+)' _VALID_URL = r'https?://(?:www\.)?hidive\.com/stream/(?P<id>(?P<title>[^/]+)/(?P<key>[^/?#&]+))'
# Using X-Forwarded-For results in 403 HTTP error for HLS fragments, # Using X-Forwarded-For results in 403 HTTP error for HLS fragments,
# so disabling geo bypass completely # so disabling geo bypass completely
_GEO_BYPASS = False _GEO_BYPASS = False
@ -55,68 +53,53 @@ def _real_initialize(self):
self._LOGIN_URL, None, 'Logging in', data=urlencode_postdata(data)) self._LOGIN_URL, None, 'Logging in', data=urlencode_postdata(data))
def _real_extract(self, url): def _real_extract(self, url):
mobj = self._match_valid_url(url) video_id, title, key = self._match_valid_url(url).group('id', 'title', 'key')
title, key = mobj.group('title', 'key')
video_id = '%s/%s' % (title, key)
webpage = self._download_webpage(url, video_id, fatal=False)
data_videos = re.findall(r'data-video=\"([^\"]+)\"\s?data-captions=\"([^\"]+)\"', webpage)
formats = []
subtitles = {}
for data_video in data_videos:
_, _, _, version, audio, _, extra = data_video[0].split('_')
caption = data_video[1]
settings = self._download_json( settings = self._download_json(
'https://www.hidive.com/play/settings', video_id, 'https://www.hidive.com/play/settings', video_id,
data=urlencode_postdata({ data=urlencode_postdata({
'Title': title, 'Title': title,
'Key': key, 'Key': key,
'PlayerId': 'f4f895ce1ca713ba263b91caeb1daa2d08904783', 'PlayerId': 'f4f895ce1ca713ba263b91caeb1daa2d08904783',
'Version': version,
'Audio': audio,
'Captions': caption,
'Extra': extra,
})) }))
restriction = settings.get('restrictionReason') restriction = settings.get('restrictionReason')
if restriction == 'RegionRestricted': if restriction == 'RegionRestricted':
self.raise_geo_restricted() self.raise_geo_restricted()
if restriction and restriction != 'None': if restriction and restriction != 'None':
raise ExtractorError( raise ExtractorError(
'%s said: %s' % (self.IE_NAME, restriction), expected=True) '%s said: %s' % (self.IE_NAME, restriction), expected=True)
formats, subtitles, urls = [], {}, {None}
for rendition_id, rendition in settings['renditions'].items(): for rendition_id, rendition in settings['renditions'].items():
audio, version, extra = rendition_id.split('_')
m3u8_url = url_or_none(try_get(rendition, lambda x: x['bitrates']['hls'])) m3u8_url = url_or_none(try_get(rendition, lambda x: x['bitrates']['hls']))
if not m3u8_url: if m3u8_url not in urls:
continue urls.add(m3u8_url)
frmt = self._extract_m3u8_formats( frmt = self._extract_m3u8_formats(
m3u8_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_url, video_id, 'mp4', entry_protocol='m3u8_native', m3u8_id=rendition_id, fatal=False)
m3u8_id='%s-%s-%s-%s' % (version, audio, extra, caption), fatal=False)
for f in frmt: for f in frmt:
f['language'] = audio f['language'] = audio
f['format_note'] = f'{version}, {extra}'
formats.extend(frmt) formats.extend(frmt)
for cc_file in rendition.get('ccFiles', []): for cc_file in rendition.get('ccFiles', []):
cc_url = url_or_none(try_get(cc_file, lambda x: x[2])) cc_url = url_or_none(try_get(cc_file, lambda x: x[2]))
# name is used since we cant distinguish subs with same language code # name is used since we cant distinguish subs with same language code
cc_lang = try_get(cc_file, (lambda x: x[1].replace(' ', '-').lower(), lambda x: x[0]), str) cc_lang = try_get(cc_file, (lambda x: x[1].replace(' ', '-').lower(), lambda x: x[0]), str)
if cc_url and cc_lang: if cc_url not in urls and cc_lang:
urls.add(cc_url)
subtitles.setdefault(cc_lang, []).append({'url': cc_url}) subtitles.setdefault(cc_lang, []).append({'url': cc_url})
self._sort_formats(formats) self._sort_formats(formats)
season_number = int_or_none(self._search_regex(
r's(\d+)', key, 'season number', default=None))
episode_number = int_or_none(self._search_regex(
r'e(\d+)', key, 'episode number', default=None))
return { return {
'id': video_id, 'id': video_id,
'title': video_id, 'title': video_id,
'subtitles': subtitles, 'subtitles': subtitles,
'formats': formats, 'formats': formats,
'series': title, 'series': title,
'season_number': season_number, 'season_number': int_or_none(
'episode_number': episode_number, self._search_regex(r's(\d+)', key, 'season number', default=None)),
'episode_number': int_or_none(
self._search_regex(r'e(\d+)', key, 'episode number', default=None)),
'http_headers': {'Referer': url} 'http_headers': {'Referer': url}
} }