mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2024-11-02 17:22:31 +01:00
[mixcloud] Try preview server first, then further numbers
This commit is contained in:
parent
7a757b7194
commit
aae53774f2
@ -1,6 +1,7 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
import itertools
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
@ -45,20 +46,19 @@ class MixcloudIE(InfoExtractor):
|
||||
},
|
||||
}]
|
||||
|
||||
def _get_url(self, track_id, template_url):
|
||||
server_count = 30
|
||||
for i in range(server_count):
|
||||
url = template_url % i
|
||||
def _get_url(self, track_id, template_url, server_number):
|
||||
boundaries = (1, 30)
|
||||
for nr in server_numbers(server_number, boundaries):
|
||||
url = template_url % nr
|
||||
try:
|
||||
# We only want to know if the request succeed
|
||||
# don't download the whole file
|
||||
self._request_webpage(
|
||||
HEADRequest(url), track_id,
|
||||
'Checking URL %d/%d ...' % (i + 1, server_count + 1))
|
||||
'Checking URL %d/%d ...' % (nr, boundaries[-1]))
|
||||
return url
|
||||
except ExtractorError:
|
||||
pass
|
||||
|
||||
return None
|
||||
|
||||
def _real_extract(self, url):
|
||||
@ -72,12 +72,13 @@ def _real_extract(self, url):
|
||||
preview_url = self._search_regex(
|
||||
r'\s(?:data-preview-url|m-preview)="([^"]+)"', webpage, 'preview url')
|
||||
song_url = preview_url.replace('/previews/', '/c/originals/')
|
||||
server_number = int(self._search_regex(r'stream(\d+)', song_url, 'server number'))
|
||||
template_url = re.sub(r'(stream\d*)', 'stream%d', song_url)
|
||||
final_song_url = self._get_url(track_id, template_url)
|
||||
final_song_url = self._get_url(track_id, template_url, server_number)
|
||||
if final_song_url is None:
|
||||
self.to_screen('Trying with m4a extension')
|
||||
template_url = template_url.replace('.mp3', '.m4a').replace('originals/', 'm4a/64/')
|
||||
final_song_url = self._get_url(track_id, template_url)
|
||||
final_song_url = self._get_url(track_id, template_url, server_number)
|
||||
if final_song_url is None:
|
||||
raise ExtractorError('Unable to extract track url')
|
||||
|
||||
@ -114,3 +115,35 @@ def _real_extract(self, url):
|
||||
'view_count': view_count,
|
||||
'like_count': like_count,
|
||||
}
|
||||
|
||||
|
||||
def server_numbers(first, boundaries):
|
||||
""" Server numbers to try in descending order of probable availability.
|
||||
Starting from first (i.e. the number of the server hosting the preview file)
|
||||
and going further and further up to the higher boundary and down to the
|
||||
lower one in an alternating fashion. Namely:
|
||||
|
||||
server_numbers(2, (1, 5))
|
||||
|
||||
# Where the preview server is 2, min number is 1 and max is 5.
|
||||
# Yields: 2, 3, 1, 4, 5
|
||||
|
||||
Why not random numbers or increasing sequences? Since from what I've seen,
|
||||
full length files seem to be hosted on servers whose number is closer to
|
||||
that of the preview; to be confirmed.
|
||||
"""
|
||||
zip_longest = getattr(itertools, 'zip_longest', None)
|
||||
if zip_longest is None:
|
||||
# python 2.x
|
||||
zip_longest = itertools.izip_longest
|
||||
|
||||
if len(boundaries) != 2:
|
||||
raise ValueError("boundaries should be a two-element tuple")
|
||||
min, max = boundaries
|
||||
highs = range(first + 1, max + 1)
|
||||
lows = range(first - 1, min - 1, -1)
|
||||
rest = filter(
|
||||
None, itertools.chain.from_iterable(zip_longest(highs, lows)))
|
||||
yield first
|
||||
for n in rest:
|
||||
yield n
|
||||
|
Loading…
Reference in New Issue
Block a user