mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2024-11-20 01:42:50 +01:00
[ie/francetv] Fix extractors (#9333)
Closes #9323 Authored by: bashonly
This commit is contained in:
parent
413d367580
commit
9749ac7fec
@ -1,21 +1,31 @@
|
|||||||
|
import urllib.parse
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from .dailymotion import DailymotionIE
|
from .dailymotion import DailymotionIE
|
||||||
|
from ..networking import HEADRequest
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
determine_ext,
|
determine_ext,
|
||||||
|
filter_dict,
|
||||||
format_field,
|
format_field,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
join_nonempty,
|
join_nonempty,
|
||||||
parse_iso8601,
|
parse_iso8601,
|
||||||
parse_qs,
|
parse_qs,
|
||||||
|
smuggle_url,
|
||||||
|
unsmuggle_url,
|
||||||
|
url_or_none,
|
||||||
)
|
)
|
||||||
|
from ..utils.traversal import traverse_obj
|
||||||
|
|
||||||
|
|
||||||
class FranceTVBaseInfoExtractor(InfoExtractor):
|
class FranceTVBaseInfoExtractor(InfoExtractor):
|
||||||
def _make_url_result(self, video_or_full_id, catalog=None):
|
def _make_url_result(self, video_or_full_id, catalog=None, url=None):
|
||||||
full_id = 'francetv:%s' % video_or_full_id
|
full_id = 'francetv:%s' % video_or_full_id
|
||||||
if '@' not in video_or_full_id and catalog:
|
if '@' not in video_or_full_id and catalog:
|
||||||
full_id += '@%s' % catalog
|
full_id += '@%s' % catalog
|
||||||
|
if url:
|
||||||
|
full_id = smuggle_url(full_id, {'hostname': urllib.parse.urlparse(url).hostname})
|
||||||
return self.url_result(
|
return self.url_result(
|
||||||
full_id, ie=FranceTVIE.ie_key(),
|
full_id, ie=FranceTVIE.ie_key(),
|
||||||
video_id=video_or_full_id.split('@')[0])
|
video_id=video_or_full_id.split('@')[0])
|
||||||
@ -35,6 +45,8 @@ class FranceTVIE(InfoExtractor):
|
|||||||
)
|
)
|
||||||
'''
|
'''
|
||||||
_EMBED_REGEX = [r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?://)?embed\.francetv\.fr/\?ue=.+?)\1']
|
_EMBED_REGEX = [r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?://)?embed\.francetv\.fr/\?ue=.+?)\1']
|
||||||
|
_GEO_COUNTRIES = ['FR']
|
||||||
|
_GEO_BYPASS = False
|
||||||
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
# without catalog
|
# without catalog
|
||||||
@ -76,10 +88,8 @@ class FranceTVIE(InfoExtractor):
|
|||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _extract_video(self, video_id, catalogue=None):
|
def _extract_video(self, video_id, catalogue=None, hostname=None):
|
||||||
# Videos are identified by idDiffusion so catalogue part is optional.
|
# TODO: Investigate/remove 'catalogue'/'catalog'; it has not been used since 2021
|
||||||
# However when provided, some extra formats may be returned so we pass
|
|
||||||
# it if available.
|
|
||||||
is_live = None
|
is_live = None
|
||||||
videos = []
|
videos = []
|
||||||
title = None
|
title = None
|
||||||
@ -94,15 +104,16 @@ def _extract_video(self, video_id, catalogue=None):
|
|||||||
for device_type in ('desktop', 'mobile'):
|
for device_type in ('desktop', 'mobile'):
|
||||||
dinfo = self._download_json(
|
dinfo = self._download_json(
|
||||||
'https://player.webservices.francetelevisions.fr/v1/videos/%s' % video_id,
|
'https://player.webservices.francetelevisions.fr/v1/videos/%s' % video_id,
|
||||||
video_id, 'Downloading %s video JSON' % device_type, query={
|
video_id, f'Downloading {device_type} video JSON', query=filter_dict({
|
||||||
'device_type': device_type,
|
'device_type': device_type,
|
||||||
'browser': 'chrome',
|
'browser': 'chrome',
|
||||||
}, fatal=False)
|
'domain': hostname,
|
||||||
|
}), fatal=False)
|
||||||
|
|
||||||
if not dinfo:
|
if not dinfo:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
video = dinfo.get('video')
|
video = traverse_obj(dinfo, ('video', {dict}))
|
||||||
if video:
|
if video:
|
||||||
videos.append(video)
|
videos.append(video)
|
||||||
if duration is None:
|
if duration is None:
|
||||||
@ -112,7 +123,7 @@ def _extract_video(self, video_id, catalogue=None):
|
|||||||
if spritesheets is None:
|
if spritesheets is None:
|
||||||
spritesheets = video.get('spritesheets')
|
spritesheets = video.get('spritesheets')
|
||||||
|
|
||||||
meta = dinfo.get('meta')
|
meta = traverse_obj(dinfo, ('meta', {dict}))
|
||||||
if meta:
|
if meta:
|
||||||
if title is None:
|
if title is None:
|
||||||
title = meta.get('title')
|
title = meta.get('title')
|
||||||
@ -126,22 +137,21 @@ def _extract_video(self, video_id, catalogue=None):
|
|||||||
if timestamp is None:
|
if timestamp is None:
|
||||||
timestamp = parse_iso8601(meta.get('broadcasted_at'))
|
timestamp = parse_iso8601(meta.get('broadcasted_at'))
|
||||||
|
|
||||||
formats = []
|
formats, subtitles, video_url = [], {}, None
|
||||||
subtitles = {}
|
for video in traverse_obj(videos, lambda _, v: url_or_none(v['url'])):
|
||||||
for video in videos:
|
video_url = video['url']
|
||||||
format_id = video.get('format')
|
format_id = video.get('format')
|
||||||
|
|
||||||
video_url = None
|
token_url = url_or_none(video.get('token'))
|
||||||
if video.get('workflow') == 'token-akamai':
|
if token_url and video.get('workflow') == 'token-akamai':
|
||||||
token_url = video.get('token')
|
tokenized_url = traverse_obj(self._download_json(
|
||||||
if token_url:
|
token_url, video_id, f'Downloading signed {format_id} manifest URL',
|
||||||
token_json = self._download_json(
|
fatal=False, query={
|
||||||
token_url, video_id,
|
'format': 'json',
|
||||||
'Downloading signed %s manifest URL' % format_id)
|
'url': video_url,
|
||||||
if token_json:
|
}), ('url', {url_or_none}))
|
||||||
video_url = token_json.get('url')
|
if tokenized_url:
|
||||||
if not video_url:
|
video_url = tokenized_url
|
||||||
video_url = video.get('url')
|
|
||||||
|
|
||||||
ext = determine_ext(video_url)
|
ext = determine_ext(video_url)
|
||||||
if ext == 'f4m':
|
if ext == 'f4m':
|
||||||
@ -174,6 +184,13 @@ def _extract_video(self, video_id, catalogue=None):
|
|||||||
|
|
||||||
# XXX: what is video['captions']?
|
# XXX: what is video['captions']?
|
||||||
|
|
||||||
|
if not formats and video_url:
|
||||||
|
urlh = self._request_webpage(
|
||||||
|
HEADRequest(video_url), video_id, 'Checking for geo-restriction',
|
||||||
|
fatal=False, expected_status=403)
|
||||||
|
if urlh and urlh.headers.get('x-errortype') == 'geo':
|
||||||
|
self.raise_geo_restricted(countries=self._GEO_COUNTRIES, metadata_available=True)
|
||||||
|
|
||||||
for f in formats:
|
for f in formats:
|
||||||
if f.get('acodec') != 'none' and f.get('language') in ('qtz', 'qad'):
|
if f.get('acodec') != 'none' and f.get('language') in ('qtz', 'qad'):
|
||||||
f['language_preference'] = -10
|
f['language_preference'] = -10
|
||||||
@ -213,6 +230,7 @@ def _extract_video(self, video_id, catalogue=None):
|
|||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
|
url, smuggled_data = unsmuggle_url(url, {})
|
||||||
mobj = self._match_valid_url(url)
|
mobj = self._match_valid_url(url)
|
||||||
video_id = mobj.group('id')
|
video_id = mobj.group('id')
|
||||||
catalog = mobj.group('catalog')
|
catalog = mobj.group('catalog')
|
||||||
@ -224,7 +242,7 @@ def _real_extract(self, url):
|
|||||||
if not video_id:
|
if not video_id:
|
||||||
raise ExtractorError('Invalid URL', expected=True)
|
raise ExtractorError('Invalid URL', expected=True)
|
||||||
|
|
||||||
return self._extract_video(video_id, catalog)
|
return self._extract_video(video_id, catalog, hostname=smuggled_data.get('hostname'))
|
||||||
|
|
||||||
|
|
||||||
class FranceTVSiteIE(FranceTVBaseInfoExtractor):
|
class FranceTVSiteIE(FranceTVBaseInfoExtractor):
|
||||||
@ -314,7 +332,7 @@ def _real_extract(self, url):
|
|||||||
r'(?:href=|player\.setVideo\(\s*)"http://videos?\.francetv\.fr/video/([^@]+@[^"]+)"',
|
r'(?:href=|player\.setVideo\(\s*)"http://videos?\.francetv\.fr/video/([^@]+@[^"]+)"',
|
||||||
webpage, 'video ID').split('@')
|
webpage, 'video ID').split('@')
|
||||||
|
|
||||||
return self._make_url_result(video_id, catalogue)
|
return self._make_url_result(video_id, catalogue, url=url)
|
||||||
|
|
||||||
|
|
||||||
class FranceTVInfoIE(FranceTVBaseInfoExtractor):
|
class FranceTVInfoIE(FranceTVBaseInfoExtractor):
|
||||||
@ -405,4 +423,4 @@ def _real_extract(self, url):
|
|||||||
r'(?:data-id|<figure[^<]+\bid)=["\']([\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})'),
|
r'(?:data-id|<figure[^<]+\bid)=["\']([\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})'),
|
||||||
webpage, 'video id')
|
webpage, 'video id')
|
||||||
|
|
||||||
return self._make_url_result(video_id)
|
return self._make_url_result(video_id, url=url)
|
||||||
|
@ -1,8 +1,7 @@
|
|||||||
from .common import InfoExtractor
|
from .francetv import FranceTVBaseInfoExtractor
|
||||||
from .francetv import FranceTVIE
|
|
||||||
|
|
||||||
|
|
||||||
class LumniIE(InfoExtractor):
|
class LumniIE(FranceTVBaseInfoExtractor):
|
||||||
_VALID_URL = r'https?://(?:www\.)?lumni\.fr/video/(?P<id>[\w-]+)'
|
_VALID_URL = r'https?://(?:www\.)?lumni\.fr/video/(?P<id>[\w-]+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://www.lumni.fr/video/l-homme-et-son-environnement-dans-la-revolution-industrielle',
|
'url': 'https://www.lumni.fr/video/l-homme-et-son-environnement-dans-la-revolution-industrielle',
|
||||||
@ -21,4 +20,4 @@ def _real_extract(self, url):
|
|||||||
webpage = self._download_webpage(url, display_id)
|
webpage = self._download_webpage(url, display_id)
|
||||||
video_id = self._html_search_regex(
|
video_id = self._html_search_regex(
|
||||||
r'<div[^>]+data-factoryid\s*=\s*["\']([^"\']+)', webpage, 'video id')
|
r'<div[^>]+data-factoryid\s*=\s*["\']([^"\']+)', webpage, 'video id')
|
||||||
return self.url_result(f'francetv:{video_id}', FranceTVIE, video_id)
|
return self._make_url_result(video_id, url=url)
|
||||||
|
Loading…
Reference in New Issue
Block a user