1
0
mirror of https://github.com/yt-dlp/yt-dlp.git synced 2024-11-02 17:22:31 +01:00

[libsyn] improve extraction(closes #20229)

This commit is contained in:
Remita Amine 2019-03-03 06:18:15 +01:00
parent 8ae113ca9d
commit 0a5baf9c21

View File

@ -1,12 +1,14 @@
# coding: utf-8 # coding: utf-8
from __future__ import unicode_literals from __future__ import unicode_literals
import json
import re import re
from .common import InfoExtractor from .common import InfoExtractor
from ..utils import ( from ..utils import (
clean_html,
get_element_by_class,
parse_duration, parse_duration,
strip_or_none,
unified_strdate, unified_strdate,
) )
@ -21,7 +23,9 @@ class LibsynIE(InfoExtractor):
'id': '6385796', 'id': '6385796',
'ext': 'mp3', 'ext': 'mp3',
'title': "Champion Minded - Developing a Growth Mindset", 'title': "Champion Minded - Developing a Growth Mindset",
'description': 'In this episode, Allistair talks about the importance of developing a growth mindset, not only in sports, but in life too.', # description fetched using another request:
# http://html5-player.libsyn.com/embed/getitemdetails?item_id=6385796
# 'description': 'In this episode, Allistair talks about the importance of developing a growth mindset, not only in sports, but in life too.',
'upload_date': '20180320', 'upload_date': '20180320',
'thumbnail': 're:^https?://.*', 'thumbnail': 're:^https?://.*',
}, },
@ -38,22 +42,36 @@ class LibsynIE(InfoExtractor):
}] }]
def _real_extract(self, url): def _real_extract(self, url):
m = re.match(self._VALID_URL, url) url, video_id = re.match(self._VALID_URL, url).groups()
video_id = m.group('id')
url = m.group('mainurl')
webpage = self._download_webpage(url, video_id) webpage = self._download_webpage(url, video_id)
podcast_title = self._search_regex( data = self._parse_json(self._search_regex(
r'<h3>([^<]+)</h3>', webpage, 'podcast title', default=None) r'var\s+playlistItem\s*=\s*({.+?});',
if podcast_title: webpage, 'JSON data block'), video_id)
podcast_title = podcast_title.strip()
episode_title = self._search_regex( episode_title = data.get('item_title') or get_element_by_class('episode-title', webpage)
r'(?:<div class="episode-title">|<h4>)([^<]+)</', webpage, 'episode title') if not episode_title:
if episode_title: self._search_regex(
episode_title = episode_title.strip() [r'data-title="([^"]+)"', r'<title>(.+?)</title>'],
webpage, 'episode title')
episode_title = episode_title.strip()
podcast_title = strip_or_none(clean_html(self._search_regex(
r'<h3>([^<]+)</h3>', webpage, 'podcast title',
default=None) or get_element_by_class('podcast-title', webpage)))
title = '%s - %s' % (podcast_title, episode_title) if podcast_title else episode_title title = '%s - %s' % (podcast_title, episode_title) if podcast_title else episode_title
formats = []
for k, format_id in (('media_url_libsyn', 'libsyn'), ('media_url', 'main'), ('download_link', 'download')):
f_url = data.get(k)
if not f_url:
continue
formats.append({
'url': f_url,
'format_id': format_id,
})
description = self._html_search_regex( description = self._html_search_regex(
r'<p\s+id="info_text_body">(.+?)</p>', webpage, r'<p\s+id="info_text_body">(.+?)</p>', webpage,
'description', default=None) 'description', default=None)
@ -61,27 +79,15 @@ def _real_extract(self, url):
# Strip non-breaking and normal spaces # Strip non-breaking and normal spaces
description = description.replace('\u00A0', ' ').strip() description = description.replace('\u00A0', ' ').strip()
release_date = unified_strdate(self._search_regex( release_date = unified_strdate(self._search_regex(
r'<div class="release_date">Released: ([^<]+)<', webpage, 'release date', fatal=False)) r'<div class="release_date">Released: ([^<]+)<',
webpage, 'release date', default=None) or data.get('release_date'))
data_json = self._search_regex(r'var\s+playlistItem\s*=\s*(\{.*?\});\n', webpage, 'JSON data block')
data = json.loads(data_json)
formats = [{
'url': data['media_url'],
'format_id': 'main',
}, {
'url': data['media_url_libsyn'],
'format_id': 'libsyn',
}]
thumbnail = data.get('thumbnail_url')
duration = parse_duration(data.get('duration'))
return { return {
'id': video_id, 'id': video_id,
'title': title, 'title': title,
'description': description, 'description': description,
'thumbnail': thumbnail, 'thumbnail': data.get('thumbnail_url'),
'upload_date': release_date, 'upload_date': release_date,
'duration': duration, 'duration': parse_duration(data.get('duration')),
'formats': formats, 'formats': formats,
} }