mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2024-11-02 17:22:31 +01:00
Remove useless u prefixes
This commit is contained in:
parent
3aa578cad2
commit
8865bdeb37
@ -45,4 +45,4 @@ def _real_extract(self, url):
|
|||||||
real_id = self._search_regex(
|
real_id = self._search_regex(
|
||||||
r"video\.settings\.pid\s*=\s*'([^']+)';",
|
r"video\.settings\.pid\s*=\s*'([^']+)';",
|
||||||
webpage, 'real video ID')
|
webpage, 'real video ID')
|
||||||
return self.url_result(u'theplatform:%s' % real_id)
|
return self.url_result('theplatform:%s' % real_id)
|
||||||
|
@ -24,7 +24,7 @@ class ClipfishIE(InfoExtractor):
|
|||||||
'title': 'FIFA 14 - E3 2013 Trailer',
|
'title': 'FIFA 14 - E3 2013 Trailer',
|
||||||
'duration': 82,
|
'duration': 82,
|
||||||
},
|
},
|
||||||
u'skip': 'Blocked in the US'
|
'skip': 'Blocked in the US'
|
||||||
}
|
}
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
@ -34,7 +34,7 @@ def _real_extract(self, url):
|
|||||||
info_url = ('http://www.clipfish.de/devxml/videoinfo/%s?ts=%d' %
|
info_url = ('http://www.clipfish.de/devxml/videoinfo/%s?ts=%d' %
|
||||||
(video_id, int(time.time())))
|
(video_id, int(time.time())))
|
||||||
doc = self._download_xml(
|
doc = self._download_xml(
|
||||||
info_url, video_id, note=u'Downloading info page')
|
info_url, video_id, note='Downloading info page')
|
||||||
title = doc.find('title').text
|
title = doc.find('title').text
|
||||||
video_url = doc.find('filename').text
|
video_url = doc.find('filename').text
|
||||||
if video_url is None:
|
if video_url is None:
|
||||||
|
@ -125,7 +125,7 @@ def _real_extract(self, url):
|
|||||||
info = {
|
info = {
|
||||||
'id': compat_str(track_data['id']),
|
'id': compat_str(track_data['id']),
|
||||||
'url': track_data['track_file_stream_url'],
|
'url': track_data['track_file_stream_url'],
|
||||||
'title': track_data['performer'] + u' - ' + track_data['name'],
|
'title': track_data['performer'] + ' - ' + track_data['name'],
|
||||||
'raw_title': track_data['name'],
|
'raw_title': track_data['name'],
|
||||||
'uploader_id': data['user']['login'],
|
'uploader_id': data['user']['login'],
|
||||||
'ext': 'm4a',
|
'ext': 'm4a',
|
||||||
|
@ -11,7 +11,7 @@ class GamekingsIE(InfoExtractor):
|
|||||||
'url': 'http://www.gamekings.tv/videos/phoenix-wright-ace-attorney-dual-destinies-review/',
|
'url': 'http://www.gamekings.tv/videos/phoenix-wright-ace-attorney-dual-destinies-review/',
|
||||||
# MD5 is flaky, seems to change regularly
|
# MD5 is flaky, seems to change regularly
|
||||||
# 'md5': '2f32b1f7b80fdc5cb616efb4f387f8a3',
|
# 'md5': '2f32b1f7b80fdc5cb616efb4f387f8a3',
|
||||||
u'info_dict': {
|
'info_dict': {
|
||||||
'id': '20130811',
|
'id': '20130811',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Phoenix Wright: Ace Attorney \u2013 Dual Destinies Review',
|
'title': 'Phoenix Wright: Ace Attorney \u2013 Dual Destinies Review',
|
||||||
|
@ -49,7 +49,7 @@ def _real_extract(self, url):
|
|||||||
page = self._download_webpage(url, video_id, 'Downloading page')
|
page = self._download_webpage(url, video_id, 'Downloading page')
|
||||||
|
|
||||||
if re.search(r'>Video Not Found or Deleted<', page) is not None:
|
if re.search(r'>Video Not Found or Deleted<', page) is not None:
|
||||||
raise ExtractorError(u'Video %s does not exist' % video_id, expected=True)
|
raise ExtractorError('Video %s does not exist' % video_id, expected=True)
|
||||||
|
|
||||||
hash_key = self._html_search_regex(r'<input type="hidden" name="hash" value="([^"]+)">', page, 'hash')
|
hash_key = self._html_search_regex(r'<input type="hidden" name="hash" value="([^"]+)">', page, 'hash')
|
||||||
title = self._html_search_regex(r'(?m)<div class="blockTitle">\s*<h2>Watch ([^<]+)</h2>', page, 'title')
|
title = self._html_search_regex(r'(?m)<div class="blockTitle">\s*<h2>Watch ([^<]+)</h2>', page, 'title')
|
||||||
|
@ -164,7 +164,7 @@ def _real_extract(self, url):
|
|||||||
if mgid is None or ':' not in mgid:
|
if mgid is None or ':' not in mgid:
|
||||||
mgid = self._search_regex(
|
mgid = self._search_regex(
|
||||||
[r'data-mgid="(.*?)"', r'swfobject.embedSWF\(".*?(mgid:.*?)"'],
|
[r'data-mgid="(.*?)"', r'swfobject.embedSWF\(".*?(mgid:.*?)"'],
|
||||||
webpage, u'mgid')
|
webpage, 'mgid')
|
||||||
return self._get_videos_info(mgid)
|
return self._get_videos_info(mgid)
|
||||||
|
|
||||||
|
|
||||||
|
@ -4,9 +4,11 @@
|
|||||||
import json
|
import json
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..compat import (
|
||||||
compat_urlparse,
|
compat_urlparse,
|
||||||
compat_urllib_parse,
|
compat_urllib_parse,
|
||||||
|
)
|
||||||
|
from ..utils import (
|
||||||
unified_strdate,
|
unified_strdate,
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -122,7 +124,7 @@ def _real_extract(self, url):
|
|||||||
response = self._download_webpage(request_url, playlist_title)
|
response = self._download_webpage(request_url, playlist_title)
|
||||||
response = self._fix_json(response)
|
response = self._fix_json(response)
|
||||||
if not response.strip():
|
if not response.strip():
|
||||||
self._downloader.report_warning(u'Got an empty reponse, trying '
|
self._downloader.report_warning('Got an empty reponse, trying '
|
||||||
'adding the "newvideos" parameter')
|
'adding the "newvideos" parameter')
|
||||||
response = self._download_webpage(request_url + '&newvideos=true',
|
response = self._download_webpage(request_url + '&newvideos=true',
|
||||||
playlist_title)
|
playlist_title)
|
||||||
|
@ -32,7 +32,7 @@ def _fetch_data(vid_id, mytv=False):
|
|||||||
data_url = base_data_url + str(vid_id)
|
data_url = base_data_url + str(vid_id)
|
||||||
data_json = self._download_webpage(
|
data_json = self._download_webpage(
|
||||||
data_url, video_id,
|
data_url, video_id,
|
||||||
note=u'Downloading JSON data for ' + str(vid_id))
|
note='Downloading JSON data for ' + str(vid_id))
|
||||||
return json.loads(data_json)
|
return json.loads(data_json)
|
||||||
|
|
||||||
mobj = re.match(self._VALID_URL, url)
|
mobj = re.match(self._VALID_URL, url)
|
||||||
@ -53,7 +53,7 @@ def _fetch_data(vid_id, mytv=False):
|
|||||||
for q in QUALITIES
|
for q in QUALITIES
|
||||||
if data['data'][q + 'Vid'] != 0]
|
if data['data'][q + 'Vid'] != 0]
|
||||||
if not vid_ids:
|
if not vid_ids:
|
||||||
raise ExtractorError(u'No formats available for this video')
|
raise ExtractorError('No formats available for this video')
|
||||||
|
|
||||||
# For now, we just pick the highest available quality
|
# For now, we just pick the highest available quality
|
||||||
vid_id = vid_ids[-1]
|
vid_id = vid_ids[-1]
|
||||||
@ -71,7 +71,7 @@ def _fetch_data(vid_id, mytv=False):
|
|||||||
(allot, prot, clipsURL[i], su[i]))
|
(allot, prot, clipsURL[i], su[i]))
|
||||||
part_str = self._download_webpage(
|
part_str = self._download_webpage(
|
||||||
part_url, video_id,
|
part_url, video_id,
|
||||||
note=u'Downloading part %d of %d' % (i + 1, part_count))
|
note='Downloading part %d of %d' % (i + 1, part_count))
|
||||||
|
|
||||||
part_info = part_str.split('|')
|
part_info = part_str.split('|')
|
||||||
video_url = '%s%s?key=%s' % (part_info[0], su[i], part_info[3])
|
video_url = '%s%s?key=%s' % (part_info[0], su[i], part_info[3])
|
||||||
|
@ -33,5 +33,6 @@ def _real_extract(self, url):
|
|||||||
# Other videos works fine with the info from the object
|
# Other videos works fine with the info from the object
|
||||||
brightcove_url = BrightcoveIE._extract_brightcove_url(webpage)
|
brightcove_url = BrightcoveIE._extract_brightcove_url(webpage)
|
||||||
if brightcove_url is None:
|
if brightcove_url is None:
|
||||||
raise ExtractorError(u'The webpage does not contain a video', expected=True)
|
raise ExtractorError(
|
||||||
|
'The webpage does not contain a video', expected=True)
|
||||||
return self.url_result(brightcove_url, BrightcoveIE.ie_key())
|
return self.url_result(brightcove_url, BrightcoveIE.ie_key())
|
||||||
|
@ -73,7 +73,7 @@ def _real_extract(self, url):
|
|||||||
result = []
|
result = []
|
||||||
len_parts = len(parts)
|
len_parts = len(parts)
|
||||||
if len_parts > 1:
|
if len_parts > 1:
|
||||||
self.to_screen(u'%s: found %s parts' % (video_id, len_parts))
|
self.to_screen('%s: found %s parts' % (video_id, len_parts))
|
||||||
for part in parts:
|
for part in parts:
|
||||||
part_id = part['k']
|
part_id = part['k']
|
||||||
final_url = self._url_for_id(part_id, quality)
|
final_url = self._url_for_id(part_id, quality)
|
||||||
|
@ -49,7 +49,7 @@ def _real_extract(self, url):
|
|||||||
try:
|
try:
|
||||||
params = json.loads(json_params)
|
params = json.loads(json_params)
|
||||||
except:
|
except:
|
||||||
raise ExtractorError(u'Invalid JSON')
|
raise ExtractorError('Invalid JSON')
|
||||||
|
|
||||||
self.report_extraction(video_id)
|
self.report_extraction(video_id)
|
||||||
try:
|
try:
|
||||||
@ -103,7 +103,7 @@ def _real_extract(self, url):
|
|||||||
self._sort_formats(formats)
|
self._sort_formats(formats)
|
||||||
|
|
||||||
if not formats:
|
if not formats:
|
||||||
raise ExtractorError(u'ERROR: no known formats available for video')
|
raise ExtractorError('ERROR: no known formats available for video')
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
|
Loading…
Reference in New Issue
Block a user