mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2024-11-20 01:42:50 +01:00
[compat] Remove more functions
Removing any more will require changes to a large number of extractors
This commit is contained in:
parent
3c5386cd71
commit
ac66811112
@ -13,9 +13,11 @@
|
|||||||
|
|
||||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
from test.helper import gettestcases
|
|
||||||
|
|
||||||
from yt_dlp.utils import compat_urllib_parse_urlparse, compat_urllib_request
|
import urllib.request
|
||||||
|
|
||||||
|
from test.helper import gettestcases
|
||||||
|
from yt_dlp.utils import compat_urllib_parse_urlparse
|
||||||
|
|
||||||
if len(sys.argv) > 1:
|
if len(sys.argv) > 1:
|
||||||
METHOD = 'LIST'
|
METHOD = 'LIST'
|
||||||
@ -26,7 +28,7 @@
|
|||||||
for test in gettestcases():
|
for test in gettestcases():
|
||||||
if METHOD == 'EURISTIC':
|
if METHOD == 'EURISTIC':
|
||||||
try:
|
try:
|
||||||
webpage = compat_urllib_request.urlopen(test['url'], timeout=10).read()
|
webpage = urllib.request.urlopen(test['url'], timeout=10).read()
|
||||||
except Exception:
|
except Exception:
|
||||||
print('\nFail: {}'.format(test['name']))
|
print('\nFail: {}'.format(test['name']))
|
||||||
continue
|
continue
|
||||||
|
@ -1,12 +1,15 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
import json
|
|
||||||
|
# Allow direct execution
|
||||||
import os
|
import os
|
||||||
import re
|
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
from yt_dlp.compat import compat_urllib_request
|
|
||||||
|
import json
|
||||||
|
import re
|
||||||
|
import urllib.request
|
||||||
|
|
||||||
# usage: python3 ./devscripts/update-formulae.py <path-to-formulae-rb> <version>
|
# usage: python3 ./devscripts/update-formulae.py <path-to-formulae-rb> <version>
|
||||||
# version can be either 0-aligned (yt-dlp version) or normalized (PyPl version)
|
# version can be either 0-aligned (yt-dlp version) or normalized (PyPl version)
|
||||||
@ -15,7 +18,7 @@
|
|||||||
|
|
||||||
normalized_version = '.'.join(str(int(x)) for x in version.split('.'))
|
normalized_version = '.'.join(str(int(x)) for x in version.split('.'))
|
||||||
|
|
||||||
pypi_release = json.loads(compat_urllib_request.urlopen(
|
pypi_release = json.loads(urllib.request.urlopen(
|
||||||
'https://pypi.org/pypi/yt-dlp/%s/json' % normalized_version
|
'https://pypi.org/pypi/yt-dlp/%s/json' % normalized_version
|
||||||
).read().decode())
|
).read().decode())
|
||||||
|
|
||||||
|
@ -6,10 +6,12 @@
|
|||||||
|
|
||||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
import threading
|
|
||||||
from test.helper import FakeYDL, expect_dict, expect_value, http_server_port
|
|
||||||
|
|
||||||
from yt_dlp.compat import compat_etree_fromstring, compat_http_server
|
import http.server
|
||||||
|
import threading
|
||||||
|
|
||||||
|
from test.helper import FakeYDL, expect_dict, expect_value, http_server_port
|
||||||
|
from yt_dlp.compat import compat_etree_fromstring
|
||||||
from yt_dlp.extractor import YoutubeIE, get_info_extractor
|
from yt_dlp.extractor import YoutubeIE, get_info_extractor
|
||||||
from yt_dlp.extractor.common import InfoExtractor
|
from yt_dlp.extractor.common import InfoExtractor
|
||||||
from yt_dlp.utils import (
|
from yt_dlp.utils import (
|
||||||
@ -23,7 +25,7 @@
|
|||||||
TEAPOT_RESPONSE_BODY = "<h1>418 I'm a teapot</h1>"
|
TEAPOT_RESPONSE_BODY = "<h1>418 I'm a teapot</h1>"
|
||||||
|
|
||||||
|
|
||||||
class InfoExtractorTestRequestHandler(compat_http_server.BaseHTTPRequestHandler):
|
class InfoExtractorTestRequestHandler(http.server.BaseHTTPRequestHandler):
|
||||||
def log_message(self, format, *args):
|
def log_message(self, format, *args):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@ -1655,7 +1657,7 @@ def test_response_with_expected_status_returns_content(self):
|
|||||||
# or the underlying `_download_webpage_handle` returning no content
|
# or the underlying `_download_webpage_handle` returning no content
|
||||||
# when a response matches `expected_status`.
|
# when a response matches `expected_status`.
|
||||||
|
|
||||||
httpd = compat_http_server.HTTPServer(
|
httpd = http.server.HTTPServer(
|
||||||
('127.0.0.1', 0), InfoExtractorTestRequestHandler)
|
('127.0.0.1', 0), InfoExtractorTestRequestHandler)
|
||||||
port = http_server_port(httpd)
|
port = http_server_port(httpd)
|
||||||
server_thread = threading.Thread(target=httpd.serve_forever)
|
server_thread = threading.Thread(target=httpd.serve_forever)
|
||||||
|
@ -8,15 +8,11 @@
|
|||||||
|
|
||||||
import copy
|
import copy
|
||||||
import json
|
import json
|
||||||
from test.helper import FakeYDL, assertRegexpMatches
|
import urllib.error
|
||||||
|
|
||||||
|
from test.helper import FakeYDL, assertRegexpMatches
|
||||||
from yt_dlp import YoutubeDL
|
from yt_dlp import YoutubeDL
|
||||||
from yt_dlp.compat import (
|
from yt_dlp.compat import compat_os_name, compat_str
|
||||||
compat_os_name,
|
|
||||||
compat_setenv,
|
|
||||||
compat_str,
|
|
||||||
compat_urllib_error,
|
|
||||||
)
|
|
||||||
from yt_dlp.extractor import YoutubeIE
|
from yt_dlp.extractor import YoutubeIE
|
||||||
from yt_dlp.extractor.common import InfoExtractor
|
from yt_dlp.extractor.common import InfoExtractor
|
||||||
from yt_dlp.postprocessor.common import PostProcessor
|
from yt_dlp.postprocessor.common import PostProcessor
|
||||||
@ -841,14 +837,14 @@ def gen():
|
|||||||
# test('%(foo|)s', ('', '_')) # fixme
|
# test('%(foo|)s', ('', '_')) # fixme
|
||||||
|
|
||||||
# Environment variable expansion for prepare_filename
|
# Environment variable expansion for prepare_filename
|
||||||
compat_setenv('__yt_dlp_var', 'expanded')
|
os.environ['__yt_dlp_var'] = 'expanded'
|
||||||
envvar = '%__yt_dlp_var%' if compat_os_name == 'nt' else '$__yt_dlp_var'
|
envvar = '%__yt_dlp_var%' if compat_os_name == 'nt' else '$__yt_dlp_var'
|
||||||
test(envvar, (envvar, 'expanded'))
|
test(envvar, (envvar, 'expanded'))
|
||||||
if compat_os_name == 'nt':
|
if compat_os_name == 'nt':
|
||||||
test('%s%', ('%s%', '%s%'))
|
test('%s%', ('%s%', '%s%'))
|
||||||
compat_setenv('s', 'expanded')
|
os.environ['s'] = 'expanded'
|
||||||
test('%s%', ('%s%', 'expanded')) # %s% should be expanded before escaping %s
|
test('%s%', ('%s%', 'expanded')) # %s% should be expanded before escaping %s
|
||||||
compat_setenv('(test)s', 'expanded')
|
os.environ['(test)s'] = 'expanded'
|
||||||
test('%(test)s%', ('NA%', 'expanded')) # Environment should take priority over template
|
test('%(test)s%', ('NA%', 'expanded')) # Environment should take priority over template
|
||||||
|
|
||||||
# Path expansion and escaping
|
# Path expansion and escaping
|
||||||
@ -1101,7 +1097,7 @@ def test_selection(params, expected_ids, evaluate_all=False):
|
|||||||
def test_urlopen_no_file_protocol(self):
|
def test_urlopen_no_file_protocol(self):
|
||||||
# see https://github.com/ytdl-org/youtube-dl/issues/8227
|
# see https://github.com/ytdl-org/youtube-dl/issues/8227
|
||||||
ydl = YDL()
|
ydl = YDL()
|
||||||
self.assertRaises(compat_urllib_error.URLError, ydl.urlopen, 'file:///etc/passwd')
|
self.assertRaises(urllib.error.URLError, ydl.urlopen, 'file:///etc/passwd')
|
||||||
|
|
||||||
def test_do_not_override_ie_key_in_url_transparent(self):
|
def test_do_not_override_ie_key_in_url_transparent(self):
|
||||||
ydl = YDL()
|
ydl = YDL()
|
||||||
|
@ -7,16 +7,15 @@
|
|||||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
|
|
||||||
|
import struct
|
||||||
|
import urllib.parse
|
||||||
|
|
||||||
from yt_dlp import compat
|
from yt_dlp import compat
|
||||||
from yt_dlp.compat import (
|
from yt_dlp.compat import (
|
||||||
compat_etree_fromstring,
|
compat_etree_fromstring,
|
||||||
compat_expanduser,
|
compat_expanduser,
|
||||||
compat_getenv,
|
|
||||||
compat_setenv,
|
|
||||||
compat_str,
|
compat_str,
|
||||||
compat_struct_unpack,
|
|
||||||
compat_urllib_parse_unquote,
|
compat_urllib_parse_unquote,
|
||||||
compat_urllib_parse_unquote_plus,
|
|
||||||
compat_urllib_parse_urlencode,
|
compat_urllib_parse_urlencode,
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -31,26 +30,14 @@ def test_compat_passthrough(self):
|
|||||||
|
|
||||||
compat.asyncio.events # Must not raise error
|
compat.asyncio.events # Must not raise error
|
||||||
|
|
||||||
def test_compat_getenv(self):
|
|
||||||
test_str = 'тест'
|
|
||||||
compat_setenv('yt_dlp_COMPAT_GETENV', test_str)
|
|
||||||
self.assertEqual(compat_getenv('yt_dlp_COMPAT_GETENV'), test_str)
|
|
||||||
|
|
||||||
def test_compat_setenv(self):
|
|
||||||
test_var = 'yt_dlp_COMPAT_SETENV'
|
|
||||||
test_str = 'тест'
|
|
||||||
compat_setenv(test_var, test_str)
|
|
||||||
compat_getenv(test_var)
|
|
||||||
self.assertEqual(compat_getenv(test_var), test_str)
|
|
||||||
|
|
||||||
def test_compat_expanduser(self):
|
def test_compat_expanduser(self):
|
||||||
old_home = os.environ.get('HOME')
|
old_home = os.environ.get('HOME')
|
||||||
test_str = R'C:\Documents and Settings\тест\Application Data'
|
test_str = R'C:\Documents and Settings\тест\Application Data'
|
||||||
try:
|
try:
|
||||||
compat_setenv('HOME', test_str)
|
os.environ['HOME'] = test_str
|
||||||
self.assertEqual(compat_expanduser('~'), test_str)
|
self.assertEqual(compat_expanduser('~'), test_str)
|
||||||
finally:
|
finally:
|
||||||
compat_setenv('HOME', old_home or '')
|
os.environ['HOME'] = old_home or ''
|
||||||
|
|
||||||
def test_compat_urllib_parse_unquote(self):
|
def test_compat_urllib_parse_unquote(self):
|
||||||
self.assertEqual(compat_urllib_parse_unquote('abc%20def'), 'abc def')
|
self.assertEqual(compat_urllib_parse_unquote('abc%20def'), 'abc def')
|
||||||
@ -72,8 +59,8 @@ def test_compat_urllib_parse_unquote(self):
|
|||||||
'''(^◣_◢^)っ︻デ═一 ⇀ ⇀ ⇀ ⇀ ⇀ ↶%I%Break%Things%''')
|
'''(^◣_◢^)っ︻デ═一 ⇀ ⇀ ⇀ ⇀ ⇀ ↶%I%Break%Things%''')
|
||||||
|
|
||||||
def test_compat_urllib_parse_unquote_plus(self):
|
def test_compat_urllib_parse_unquote_plus(self):
|
||||||
self.assertEqual(compat_urllib_parse_unquote_plus('abc%20def'), 'abc def')
|
self.assertEqual(urllib.parse.unquote_plus('abc%20def'), 'abc def')
|
||||||
self.assertEqual(compat_urllib_parse_unquote_plus('%7e/abc+def'), '~/abc def')
|
self.assertEqual(urllib.parse.unquote_plus('%7e/abc+def'), '~/abc def')
|
||||||
|
|
||||||
def test_compat_urllib_parse_urlencode(self):
|
def test_compat_urllib_parse_urlencode(self):
|
||||||
self.assertEqual(compat_urllib_parse_urlencode({'abc': 'def'}), 'abc=def')
|
self.assertEqual(compat_urllib_parse_urlencode({'abc': 'def'}), 'abc=def')
|
||||||
@ -107,7 +94,7 @@ def test_compat_etree_fromstring_doctype(self):
|
|||||||
compat_etree_fromstring(xml)
|
compat_etree_fromstring(xml)
|
||||||
|
|
||||||
def test_struct_unpack(self):
|
def test_struct_unpack(self):
|
||||||
self.assertEqual(compat_struct_unpack('!B', b'\x00'), (0,))
|
self.assertEqual(struct.unpack('!B', b'\x00'), (0,))
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
@ -1,14 +1,18 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
# Allow direct execution
|
# Allow direct execution
|
||||||
import hashlib
|
|
||||||
import json
|
|
||||||
import os
|
import os
|
||||||
import socket
|
|
||||||
import sys
|
import sys
|
||||||
import unittest
|
import unittest
|
||||||
|
|
||||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
|
|
||||||
|
import hashlib
|
||||||
|
import json
|
||||||
|
import socket
|
||||||
|
import urllib.error
|
||||||
|
import http.client
|
||||||
|
|
||||||
from test.helper import (
|
from test.helper import (
|
||||||
assertGreaterEqual,
|
assertGreaterEqual,
|
||||||
expect_info_dict,
|
expect_info_dict,
|
||||||
@ -19,13 +23,8 @@
|
|||||||
report_warning,
|
report_warning,
|
||||||
try_rm,
|
try_rm,
|
||||||
)
|
)
|
||||||
|
import yt_dlp.YoutubeDL # isort: split
|
||||||
import yt_dlp.YoutubeDL
|
from yt_dlp.compat import compat_HTTPError
|
||||||
from yt_dlp.compat import (
|
|
||||||
compat_http_client,
|
|
||||||
compat_HTTPError,
|
|
||||||
compat_urllib_error,
|
|
||||||
)
|
|
||||||
from yt_dlp.extractor import get_info_extractor
|
from yt_dlp.extractor import get_info_extractor
|
||||||
from yt_dlp.utils import (
|
from yt_dlp.utils import (
|
||||||
DownloadError,
|
DownloadError,
|
||||||
@ -167,7 +166,7 @@ def try_rm_tcs_files(tcs=None):
|
|||||||
force_generic_extractor=params.get('force_generic_extractor', False))
|
force_generic_extractor=params.get('force_generic_extractor', False))
|
||||||
except (DownloadError, ExtractorError) as err:
|
except (DownloadError, ExtractorError) as err:
|
||||||
# Check if the exception is not a network related one
|
# Check if the exception is not a network related one
|
||||||
if not err.exc_info[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError, compat_http_client.BadStatusLine) or (err.exc_info[0] == compat_HTTPError and err.exc_info[1].code == 503):
|
if not err.exc_info[0] in (urllib.error.URLError, socket.timeout, UnavailableVideoError, http.client.BadStatusLine) or (err.exc_info[0] == compat_HTTPError and err.exc_info[1].code == 503):
|
||||||
raise
|
raise
|
||||||
|
|
||||||
if try_num == RETRIES:
|
if try_num == RETRIES:
|
||||||
|
@ -1,17 +1,18 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
# Allow direct execution
|
# Allow direct execution
|
||||||
import os
|
import os
|
||||||
import re
|
|
||||||
import sys
|
import sys
|
||||||
import unittest
|
import unittest
|
||||||
|
import http.server
|
||||||
|
|
||||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
|
|
||||||
|
import re
|
||||||
import threading
|
import threading
|
||||||
from test.helper import http_server_port, try_rm
|
from test.helper import http_server_port, try_rm
|
||||||
|
|
||||||
from yt_dlp import YoutubeDL
|
from yt_dlp import YoutubeDL
|
||||||
from yt_dlp.compat import compat_http_server
|
|
||||||
from yt_dlp.downloader.http import HttpFD
|
from yt_dlp.downloader.http import HttpFD
|
||||||
from yt_dlp.utils import encodeFilename
|
from yt_dlp.utils import encodeFilename
|
||||||
|
|
||||||
@ -21,7 +22,7 @@
|
|||||||
TEST_SIZE = 10 * 1024
|
TEST_SIZE = 10 * 1024
|
||||||
|
|
||||||
|
|
||||||
class HTTPTestRequestHandler(compat_http_server.BaseHTTPRequestHandler):
|
class HTTPTestRequestHandler(http.server.BaseHTTPRequestHandler):
|
||||||
def log_message(self, format, *args):
|
def log_message(self, format, *args):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@ -78,7 +79,7 @@ def error(self, msg):
|
|||||||
|
|
||||||
class TestHttpFD(unittest.TestCase):
|
class TestHttpFD(unittest.TestCase):
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
self.httpd = compat_http_server.HTTPServer(
|
self.httpd = http.server.HTTPServer(
|
||||||
('127.0.0.1', 0), HTTPTestRequestHandler)
|
('127.0.0.1', 0), HTTPTestRequestHandler)
|
||||||
self.port = http_server_port(self.httpd)
|
self.port = http_server_port(self.httpd)
|
||||||
self.server_thread = threading.Thread(target=self.httpd.serve_forever)
|
self.server_thread = threading.Thread(target=self.httpd.serve_forever)
|
||||||
|
@ -3,20 +3,22 @@
|
|||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
import unittest
|
import unittest
|
||||||
|
import http.server
|
||||||
|
|
||||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
|
|
||||||
import ssl
|
import ssl
|
||||||
import threading
|
import threading
|
||||||
|
import urllib.request
|
||||||
from test.helper import http_server_port
|
from test.helper import http_server_port
|
||||||
|
|
||||||
from yt_dlp import YoutubeDL
|
from yt_dlp import YoutubeDL
|
||||||
from yt_dlp.compat import compat_http_server, compat_urllib_request
|
|
||||||
|
|
||||||
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
|
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||||
|
|
||||||
|
|
||||||
class HTTPTestRequestHandler(compat_http_server.BaseHTTPRequestHandler):
|
class HTTPTestRequestHandler(http.server.BaseHTTPRequestHandler):
|
||||||
def log_message(self, format, *args):
|
def log_message(self, format, *args):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@ -53,7 +55,7 @@ def error(self, msg):
|
|||||||
|
|
||||||
class TestHTTP(unittest.TestCase):
|
class TestHTTP(unittest.TestCase):
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
self.httpd = compat_http_server.HTTPServer(
|
self.httpd = http.server.HTTPServer(
|
||||||
('127.0.0.1', 0), HTTPTestRequestHandler)
|
('127.0.0.1', 0), HTTPTestRequestHandler)
|
||||||
self.port = http_server_port(self.httpd)
|
self.port = http_server_port(self.httpd)
|
||||||
self.server_thread = threading.Thread(target=self.httpd.serve_forever)
|
self.server_thread = threading.Thread(target=self.httpd.serve_forever)
|
||||||
@ -64,7 +66,7 @@ def setUp(self):
|
|||||||
class TestHTTPS(unittest.TestCase):
|
class TestHTTPS(unittest.TestCase):
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
certfn = os.path.join(TEST_DIR, 'testcert.pem')
|
certfn = os.path.join(TEST_DIR, 'testcert.pem')
|
||||||
self.httpd = compat_http_server.HTTPServer(
|
self.httpd = http.server.HTTPServer(
|
||||||
('127.0.0.1', 0), HTTPTestRequestHandler)
|
('127.0.0.1', 0), HTTPTestRequestHandler)
|
||||||
sslctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
|
sslctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
|
||||||
sslctx.load_cert_chain(certfn, None)
|
sslctx.load_cert_chain(certfn, None)
|
||||||
@ -90,7 +92,7 @@ def setUp(self):
|
|||||||
certfn = os.path.join(TEST_DIR, 'testcert.pem')
|
certfn = os.path.join(TEST_DIR, 'testcert.pem')
|
||||||
self.certdir = os.path.join(TEST_DIR, 'testdata', 'certificate')
|
self.certdir = os.path.join(TEST_DIR, 'testdata', 'certificate')
|
||||||
cacertfn = os.path.join(self.certdir, 'ca.crt')
|
cacertfn = os.path.join(self.certdir, 'ca.crt')
|
||||||
self.httpd = compat_http_server.HTTPServer(('127.0.0.1', 0), HTTPTestRequestHandler)
|
self.httpd = http.server.HTTPServer(('127.0.0.1', 0), HTTPTestRequestHandler)
|
||||||
sslctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
|
sslctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
|
||||||
sslctx.verify_mode = ssl.CERT_REQUIRED
|
sslctx.verify_mode = ssl.CERT_REQUIRED
|
||||||
sslctx.load_verify_locations(cafile=cacertfn)
|
sslctx.load_verify_locations(cafile=cacertfn)
|
||||||
@ -130,7 +132,7 @@ def test_certificate_nocombined_pass(self):
|
|||||||
|
|
||||||
|
|
||||||
def _build_proxy_handler(name):
|
def _build_proxy_handler(name):
|
||||||
class HTTPTestRequestHandler(compat_http_server.BaseHTTPRequestHandler):
|
class HTTPTestRequestHandler(http.server.BaseHTTPRequestHandler):
|
||||||
proxy_name = name
|
proxy_name = name
|
||||||
|
|
||||||
def log_message(self, format, *args):
|
def log_message(self, format, *args):
|
||||||
@ -146,14 +148,14 @@ def do_GET(self):
|
|||||||
|
|
||||||
class TestProxy(unittest.TestCase):
|
class TestProxy(unittest.TestCase):
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
self.proxy = compat_http_server.HTTPServer(
|
self.proxy = http.server.HTTPServer(
|
||||||
('127.0.0.1', 0), _build_proxy_handler('normal'))
|
('127.0.0.1', 0), _build_proxy_handler('normal'))
|
||||||
self.port = http_server_port(self.proxy)
|
self.port = http_server_port(self.proxy)
|
||||||
self.proxy_thread = threading.Thread(target=self.proxy.serve_forever)
|
self.proxy_thread = threading.Thread(target=self.proxy.serve_forever)
|
||||||
self.proxy_thread.daemon = True
|
self.proxy_thread.daemon = True
|
||||||
self.proxy_thread.start()
|
self.proxy_thread.start()
|
||||||
|
|
||||||
self.geo_proxy = compat_http_server.HTTPServer(
|
self.geo_proxy = http.server.HTTPServer(
|
||||||
('127.0.0.1', 0), _build_proxy_handler('geo'))
|
('127.0.0.1', 0), _build_proxy_handler('geo'))
|
||||||
self.geo_port = http_server_port(self.geo_proxy)
|
self.geo_port = http_server_port(self.geo_proxy)
|
||||||
self.geo_proxy_thread = threading.Thread(target=self.geo_proxy.serve_forever)
|
self.geo_proxy_thread = threading.Thread(target=self.geo_proxy.serve_forever)
|
||||||
@ -170,7 +172,7 @@ def test_proxy(self):
|
|||||||
response = ydl.urlopen(url).read().decode()
|
response = ydl.urlopen(url).read().decode()
|
||||||
self.assertEqual(response, f'normal: {url}')
|
self.assertEqual(response, f'normal: {url}')
|
||||||
|
|
||||||
req = compat_urllib_request.Request(url)
|
req = urllib.request.Request(url)
|
||||||
req.add_header('Ytdl-request-proxy', geo_proxy)
|
req.add_header('Ytdl-request-proxy', geo_proxy)
|
||||||
response = ydl.urlopen(req).read().decode()
|
response = ydl.urlopen(req).read().decode()
|
||||||
self.assertEqual(response, f'geo: {url}')
|
self.assertEqual(response, f'geo: {url}')
|
||||||
|
@ -8,9 +8,10 @@
|
|||||||
|
|
||||||
import random
|
import random
|
||||||
import subprocess
|
import subprocess
|
||||||
from test.helper import FakeYDL, get_params, is_download_test
|
import urllib.request
|
||||||
|
|
||||||
from yt_dlp.compat import compat_str, compat_urllib_request
|
from test.helper import FakeYDL, get_params, is_download_test
|
||||||
|
from yt_dlp.compat import compat_str
|
||||||
|
|
||||||
|
|
||||||
@is_download_test
|
@is_download_test
|
||||||
@ -51,7 +52,7 @@ def test_secondary_proxy_http(self):
|
|||||||
if params is None:
|
if params is None:
|
||||||
return
|
return
|
||||||
ydl = FakeYDL()
|
ydl = FakeYDL()
|
||||||
req = compat_urllib_request.Request('http://yt-dl.org/ip')
|
req = urllib.request.Request('http://yt-dl.org/ip')
|
||||||
req.add_header('Ytdl-request-proxy', params['secondary_proxy'])
|
req.add_header('Ytdl-request-proxy', params['secondary_proxy'])
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
ydl.urlopen(req).read().decode(),
|
ydl.urlopen(req).read().decode(),
|
||||||
@ -62,7 +63,7 @@ def test_secondary_proxy_https(self):
|
|||||||
if params is None:
|
if params is None:
|
||||||
return
|
return
|
||||||
ydl = FakeYDL()
|
ydl = FakeYDL()
|
||||||
req = compat_urllib_request.Request('https://yt-dl.org/ip')
|
req = urllib.request.Request('https://yt-dl.org/ip')
|
||||||
req.add_header('Ytdl-request-proxy', params['secondary_proxy'])
|
req.add_header('Ytdl-request-proxy', params['secondary_proxy'])
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
ydl.urlopen(req).read().decode(),
|
ydl.urlopen(req).read().decode(),
|
||||||
|
@ -6,8 +6,8 @@
|
|||||||
|
|
||||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
from test.helper import FakeYDL, is_download_test, md5
|
|
||||||
|
|
||||||
|
from test.helper import FakeYDL, is_download_test, md5
|
||||||
from yt_dlp.extractor import (
|
from yt_dlp.extractor import (
|
||||||
NPOIE,
|
NPOIE,
|
||||||
NRKTVIE,
|
NRKTVIE,
|
||||||
|
@ -15,12 +15,9 @@
|
|||||||
import xml.etree.ElementTree
|
import xml.etree.ElementTree
|
||||||
|
|
||||||
from yt_dlp.compat import (
|
from yt_dlp.compat import (
|
||||||
compat_chr,
|
|
||||||
compat_etree_fromstring,
|
compat_etree_fromstring,
|
||||||
compat_getenv,
|
|
||||||
compat_HTMLParseError,
|
compat_HTMLParseError,
|
||||||
compat_os_name,
|
compat_os_name,
|
||||||
compat_setenv,
|
|
||||||
)
|
)
|
||||||
from yt_dlp.utils import (
|
from yt_dlp.utils import (
|
||||||
Config,
|
Config,
|
||||||
@ -266,20 +263,20 @@ def test_expand_path(self):
|
|||||||
def env(var):
|
def env(var):
|
||||||
return f'%{var}%' if sys.platform == 'win32' else f'${var}'
|
return f'%{var}%' if sys.platform == 'win32' else f'${var}'
|
||||||
|
|
||||||
compat_setenv('yt_dlp_EXPATH_PATH', 'expanded')
|
os.environ['yt_dlp_EXPATH_PATH'] = 'expanded'
|
||||||
self.assertEqual(expand_path(env('yt_dlp_EXPATH_PATH')), 'expanded')
|
self.assertEqual(expand_path(env('yt_dlp_EXPATH_PATH')), 'expanded')
|
||||||
|
|
||||||
old_home = os.environ.get('HOME')
|
old_home = os.environ.get('HOME')
|
||||||
test_str = R'C:\Documents and Settings\тест\Application Data'
|
test_str = R'C:\Documents and Settings\тест\Application Data'
|
||||||
try:
|
try:
|
||||||
compat_setenv('HOME', test_str)
|
os.environ['HOME'] = test_str
|
||||||
self.assertEqual(expand_path(env('HOME')), compat_getenv('HOME'))
|
self.assertEqual(expand_path(env('HOME')), os.getenv('HOME'))
|
||||||
self.assertEqual(expand_path('~'), compat_getenv('HOME'))
|
self.assertEqual(expand_path('~'), os.getenv('HOME'))
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
expand_path('~/%s' % env('yt_dlp_EXPATH_PATH')),
|
expand_path('~/%s' % env('yt_dlp_EXPATH_PATH')),
|
||||||
'%s/expanded' % compat_getenv('HOME'))
|
'%s/expanded' % os.getenv('HOME'))
|
||||||
finally:
|
finally:
|
||||||
compat_setenv('HOME', old_home or '')
|
os.environ['HOME'] = old_home or ''
|
||||||
|
|
||||||
def test_prepend_extension(self):
|
def test_prepend_extension(self):
|
||||||
self.assertEqual(prepend_extension('abc.ext', 'temp'), 'abc.temp.ext')
|
self.assertEqual(prepend_extension('abc.ext', 'temp'), 'abc.temp.ext')
|
||||||
@ -1128,7 +1125,7 @@ def test_extract_attributes(self):
|
|||||||
self.assertEqual(extract_attributes('<e x="décomposé">'), {'x': 'décompose\u0301'})
|
self.assertEqual(extract_attributes('<e x="décomposé">'), {'x': 'décompose\u0301'})
|
||||||
# "Narrow" Python builds don't support unicode code points outside BMP.
|
# "Narrow" Python builds don't support unicode code points outside BMP.
|
||||||
try:
|
try:
|
||||||
compat_chr(0x10000)
|
chr(0x10000)
|
||||||
supports_outside_bmp = True
|
supports_outside_bmp = True
|
||||||
except ValueError:
|
except ValueError:
|
||||||
supports_outside_bmp = False
|
supports_outside_bmp = False
|
||||||
|
@ -26,15 +26,8 @@
|
|||||||
from string import ascii_letters
|
from string import ascii_letters
|
||||||
|
|
||||||
from .cache import Cache
|
from .cache import Cache
|
||||||
from .compat import (
|
from .compat import HAS_LEGACY as compat_has_legacy
|
||||||
HAS_LEGACY as compat_has_legacy,
|
from .compat import compat_os_name, compat_shlex_quote, compat_str
|
||||||
compat_get_terminal_size,
|
|
||||||
compat_os_name,
|
|
||||||
compat_shlex_quote,
|
|
||||||
compat_str,
|
|
||||||
compat_urllib_error,
|
|
||||||
compat_urllib_request,
|
|
||||||
)
|
|
||||||
from .cookies import load_cookies
|
from .cookies import load_cookies
|
||||||
from .downloader import FFmpegFD, get_suitable_downloader, shorten_protocol_name
|
from .downloader import FFmpegFD, get_suitable_downloader, shorten_protocol_name
|
||||||
from .downloader.rtmp import rtmpdump_version
|
from .downloader.rtmp import rtmpdump_version
|
||||||
@ -644,7 +637,7 @@ def check_deprecated(param, option, suggestion):
|
|||||||
try:
|
try:
|
||||||
import pty
|
import pty
|
||||||
master, slave = pty.openpty()
|
master, slave = pty.openpty()
|
||||||
width = compat_get_terminal_size().columns
|
width = shutil.get_terminal_size().columns
|
||||||
width_args = [] if width is None else ['-w', str(width)]
|
width_args = [] if width is None else ['-w', str(width)]
|
||||||
sp_kwargs = {'stdin': subprocess.PIPE, 'stdout': slave, 'stderr': self._out_files.error}
|
sp_kwargs = {'stdin': subprocess.PIPE, 'stdout': slave, 'stderr': self._out_files.error}
|
||||||
try:
|
try:
|
||||||
@ -3724,7 +3717,7 @@ def _setup_opener(self):
|
|||||||
else:
|
else:
|
||||||
proxies = {'http': opts_proxy, 'https': opts_proxy}
|
proxies = {'http': opts_proxy, 'https': opts_proxy}
|
||||||
else:
|
else:
|
||||||
proxies = compat_urllib_request.getproxies()
|
proxies = urllib.request.getproxies()
|
||||||
# Set HTTPS proxy to HTTP one if given (https://github.com/ytdl-org/youtube-dl/issues/805)
|
# Set HTTPS proxy to HTTP one if given (https://github.com/ytdl-org/youtube-dl/issues/805)
|
||||||
if 'http' in proxies and 'https' not in proxies:
|
if 'http' in proxies and 'https' not in proxies:
|
||||||
proxies['https'] = proxies['http']
|
proxies['https'] = proxies['http']
|
||||||
@ -3740,13 +3733,13 @@ def _setup_opener(self):
|
|||||||
# default FileHandler and allows us to disable the file protocol, which
|
# default FileHandler and allows us to disable the file protocol, which
|
||||||
# can be used for malicious purposes (see
|
# can be used for malicious purposes (see
|
||||||
# https://github.com/ytdl-org/youtube-dl/issues/8227)
|
# https://github.com/ytdl-org/youtube-dl/issues/8227)
|
||||||
file_handler = compat_urllib_request.FileHandler()
|
file_handler = urllib.request.FileHandler()
|
||||||
|
|
||||||
def file_open(*args, **kwargs):
|
def file_open(*args, **kwargs):
|
||||||
raise compat_urllib_error.URLError('file:// scheme is explicitly disabled in yt-dlp for security reasons')
|
raise urllib.error.URLError('file:// scheme is explicitly disabled in yt-dlp for security reasons')
|
||||||
file_handler.file_open = file_open
|
file_handler.file_open = file_open
|
||||||
|
|
||||||
opener = compat_urllib_request.build_opener(
|
opener = urllib.request.build_opener(
|
||||||
proxy_handler, https_handler, cookie_processor, ydlh, redirect_handler, data_handler, file_handler)
|
proxy_handler, https_handler, cookie_processor, ydlh, redirect_handler, data_handler, file_handler)
|
||||||
|
|
||||||
# Delete the default user-agent header, which would otherwise apply in
|
# Delete the default user-agent header, which would otherwise apply in
|
||||||
|
@ -3,13 +3,14 @@
|
|||||||
|
|
||||||
__license__ = 'Public Domain'
|
__license__ = 'Public Domain'
|
||||||
|
|
||||||
|
import getpass
|
||||||
import itertools
|
import itertools
|
||||||
import optparse
|
import optparse
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
from .compat import compat_getpass, compat_shlex_quote
|
from .compat import compat_shlex_quote
|
||||||
from .cookies import SUPPORTED_BROWSERS, SUPPORTED_KEYRINGS
|
from .cookies import SUPPORTED_BROWSERS, SUPPORTED_KEYRINGS
|
||||||
from .downloader import FileDownloader
|
from .downloader import FileDownloader
|
||||||
from .downloader.external import get_external_downloader
|
from .downloader.external import get_external_downloader
|
||||||
@ -531,9 +532,9 @@ def report_deprecation(val, old, new=None):
|
|||||||
|
|
||||||
# Ask for passwords
|
# Ask for passwords
|
||||||
if opts.username is not None and opts.password is None:
|
if opts.username is not None and opts.password is None:
|
||||||
opts.password = compat_getpass('Type account password and press [Return]: ')
|
opts.password = getpass.getpass('Type account password and press [Return]: ')
|
||||||
if opts.ap_username is not None and opts.ap_password is None:
|
if opts.ap_username is not None and opts.ap_password is None:
|
||||||
opts.ap_password = compat_getpass('Type TV provider account password and press [Return]: ')
|
opts.ap_password = getpass.getpass('Type TV provider account password and press [Return]: ')
|
||||||
|
|
||||||
return warnings, deprecation_warnings
|
return warnings, deprecation_warnings
|
||||||
|
|
||||||
|
@ -6,7 +6,6 @@
|
|||||||
import shutil
|
import shutil
|
||||||
import traceback
|
import traceback
|
||||||
|
|
||||||
from .compat import compat_getenv
|
|
||||||
from .utils import expand_path, write_json_file
|
from .utils import expand_path, write_json_file
|
||||||
|
|
||||||
|
|
||||||
@ -17,7 +16,7 @@ def __init__(self, ydl):
|
|||||||
def _get_root_dir(self):
|
def _get_root_dir(self):
|
||||||
res = self._ydl.params.get('cachedir')
|
res = self._ydl.params.get('cachedir')
|
||||||
if res is None:
|
if res is None:
|
||||||
cache_root = compat_getenv('XDG_CACHE_HOME', '~/.cache')
|
cache_root = os.getenv('XDG_CACHE_HOME', '~/.cache')
|
||||||
res = os.path.join(cache_root, 'yt-dlp')
|
res = os.path.join(cache_root, 'yt-dlp')
|
||||||
return expand_path(res)
|
return expand_path(res)
|
||||||
|
|
||||||
|
@ -1,52 +1,16 @@
|
|||||||
"""Deprecated - New code should avoid these"""
|
"""Deprecated - New code should avoid these"""
|
||||||
|
|
||||||
import base64
|
import base64
|
||||||
import getpass
|
import urllib.error
|
||||||
import html
|
import urllib.parse
|
||||||
import html.parser
|
|
||||||
import http
|
compat_str = str
|
||||||
import http.client
|
|
||||||
import http.cookiejar
|
|
||||||
import http.cookies
|
|
||||||
import http.server
|
|
||||||
import itertools
|
|
||||||
import os
|
|
||||||
import shutil
|
|
||||||
import struct
|
|
||||||
import tokenize
|
|
||||||
import urllib
|
|
||||||
|
|
||||||
compat_b64decode = base64.b64decode
|
compat_b64decode = base64.b64decode
|
||||||
compat_chr = chr
|
|
||||||
compat_cookiejar = http.cookiejar
|
|
||||||
compat_cookiejar_Cookie = http.cookiejar.Cookie
|
|
||||||
compat_cookies_SimpleCookie = http.cookies.SimpleCookie
|
|
||||||
compat_get_terminal_size = shutil.get_terminal_size
|
|
||||||
compat_getenv = os.getenv
|
|
||||||
compat_getpass = getpass.getpass
|
|
||||||
compat_html_entities = html.entities
|
|
||||||
compat_html_entities_html5 = html.entities.html5
|
|
||||||
compat_HTMLParser = html.parser.HTMLParser
|
|
||||||
compat_http_client = http.client
|
|
||||||
compat_http_server = http.server
|
|
||||||
compat_HTTPError = urllib.error.HTTPError
|
compat_HTTPError = urllib.error.HTTPError
|
||||||
compat_itertools_count = itertools.count
|
compat_urlparse = urllib.parse
|
||||||
compat_parse_qs = urllib.parse.parse_qs
|
compat_parse_qs = urllib.parse.parse_qs
|
||||||
compat_str = str
|
|
||||||
compat_struct_pack = struct.pack
|
|
||||||
compat_struct_unpack = struct.unpack
|
|
||||||
compat_tokenize_tokenize = tokenize.tokenize
|
|
||||||
compat_urllib_error = urllib.error
|
|
||||||
compat_urllib_parse_unquote = urllib.parse.unquote
|
compat_urllib_parse_unquote = urllib.parse.unquote
|
||||||
compat_urllib_parse_unquote_plus = urllib.parse.unquote_plus
|
|
||||||
compat_urllib_parse_urlencode = urllib.parse.urlencode
|
compat_urllib_parse_urlencode = urllib.parse.urlencode
|
||||||
compat_urllib_parse_urlparse = urllib.parse.urlparse
|
compat_urllib_parse_urlparse = urllib.parse.urlparse
|
||||||
compat_urllib_request = urllib.request
|
|
||||||
compat_urlparse = compat_urllib_parse = urllib.parse
|
|
||||||
|
|
||||||
|
|
||||||
def compat_setenv(key, value, env=os.environ):
|
|
||||||
env[key] = value
|
|
||||||
|
|
||||||
|
|
||||||
__all__ = [x for x in globals() if x.startswith('compat_')]
|
|
||||||
|
@ -2,15 +2,23 @@
|
|||||||
|
|
||||||
import collections
|
import collections
|
||||||
import ctypes
|
import ctypes
|
||||||
import http
|
import getpass
|
||||||
|
import html.entities
|
||||||
|
import html.parser
|
||||||
import http.client
|
import http.client
|
||||||
import http.cookiejar
|
import http.cookiejar
|
||||||
import http.cookies
|
import http.cookies
|
||||||
import http.server
|
import http.server
|
||||||
|
import itertools
|
||||||
|
import os
|
||||||
import shlex
|
import shlex
|
||||||
|
import shutil
|
||||||
import socket
|
import socket
|
||||||
import struct
|
import struct
|
||||||
import urllib
|
import tokenize
|
||||||
|
import urllib.error
|
||||||
|
import urllib.parse
|
||||||
|
import urllib.request
|
||||||
import xml.etree.ElementTree as etree
|
import xml.etree.ElementTree as etree
|
||||||
from subprocess import DEVNULL
|
from subprocess import DEVNULL
|
||||||
|
|
||||||
@ -32,12 +40,17 @@ def compat_ctypes_WINFUNCTYPE(*args, **kwargs):
|
|||||||
return ctypes.WINFUNCTYPE(*args, **kwargs)
|
return ctypes.WINFUNCTYPE(*args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
def compat_setenv(key, value, env=os.environ):
|
||||||
|
env[key] = value
|
||||||
|
|
||||||
|
|
||||||
compat_basestring = str
|
compat_basestring = str
|
||||||
compat_collections_abc = collections.abc
|
compat_collections_abc = collections.abc
|
||||||
compat_cookies = http.cookies
|
compat_cookies = http.cookies
|
||||||
compat_etree_Element = etree.Element
|
compat_etree_Element = etree.Element
|
||||||
compat_etree_register_namespace = etree.register_namespace
|
compat_etree_register_namespace = etree.register_namespace
|
||||||
compat_filter = filter
|
compat_filter = filter
|
||||||
|
compat_getenv = os.getenv
|
||||||
compat_input = input
|
compat_input = input
|
||||||
compat_integer_types = (int, )
|
compat_integer_types = (int, )
|
||||||
compat_kwargs = lambda kwargs: kwargs
|
compat_kwargs = lambda kwargs: kwargs
|
||||||
@ -53,9 +66,28 @@ def compat_ctypes_WINFUNCTYPE(*args, **kwargs):
|
|||||||
compat_urllib_parse_unquote_to_bytes = urllib.parse.unquote_to_bytes
|
compat_urllib_parse_unquote_to_bytes = urllib.parse.unquote_to_bytes
|
||||||
compat_urllib_parse_urlunparse = urllib.parse.urlunparse
|
compat_urllib_parse_urlunparse = urllib.parse.urlunparse
|
||||||
compat_urllib_request_DataHandler = urllib.request.DataHandler
|
compat_urllib_request_DataHandler = urllib.request.DataHandler
|
||||||
|
compat_urllib_request = urllib.request
|
||||||
compat_urllib_response = urllib.response
|
compat_urllib_response = urllib.response
|
||||||
compat_urlretrieve = urllib.request.urlretrieve
|
compat_urlretrieve = urllib.request.urlretrieve
|
||||||
compat_xml_parse_error = etree.ParseError
|
compat_xml_parse_error = etree.ParseError
|
||||||
compat_xpath = lambda xpath: xpath
|
compat_xpath = lambda xpath: xpath
|
||||||
compat_zip = zip
|
compat_zip = zip
|
||||||
workaround_optparse_bug9161 = lambda: None
|
workaround_optparse_bug9161 = lambda: None
|
||||||
|
compat_getpass = getpass.getpass
|
||||||
|
compat_chr = chr
|
||||||
|
compat_urllib_parse = urllib.parse
|
||||||
|
compat_itertools_count = itertools.count
|
||||||
|
compat_cookiejar = http.cookiejar
|
||||||
|
compat_cookiejar_Cookie = http.cookiejar.Cookie
|
||||||
|
compat_cookies_SimpleCookie = http.cookies.SimpleCookie
|
||||||
|
compat_get_terminal_size = shutil.get_terminal_size
|
||||||
|
compat_html_entities = html.entities
|
||||||
|
compat_html_entities_html5 = html.entities.html5
|
||||||
|
compat_tokenize_tokenize = tokenize.tokenize
|
||||||
|
compat_HTMLParser = html.parser.HTMLParser
|
||||||
|
compat_http_client = http.client
|
||||||
|
compat_http_server = http.server
|
||||||
|
compat_struct_pack = struct.pack
|
||||||
|
compat_struct_unpack = struct.unpack
|
||||||
|
compat_urllib_error = urllib.error
|
||||||
|
compat_urllib_parse_unquote_plus = urllib.parse.unquote_plus
|
||||||
|
@ -11,13 +11,14 @@
|
|||||||
from datetime import datetime, timedelta, timezone
|
from datetime import datetime, timedelta, timezone
|
||||||
from enum import Enum, auto
|
from enum import Enum, auto
|
||||||
from hashlib import pbkdf2_hmac
|
from hashlib import pbkdf2_hmac
|
||||||
|
import http.cookiejar
|
||||||
|
|
||||||
from .aes import (
|
from .aes import (
|
||||||
aes_cbc_decrypt_bytes,
|
aes_cbc_decrypt_bytes,
|
||||||
aes_gcm_decrypt_and_verify_bytes,
|
aes_gcm_decrypt_and_verify_bytes,
|
||||||
unpad_pkcs7,
|
unpad_pkcs7,
|
||||||
)
|
)
|
||||||
from .compat import compat_b64decode, compat_cookiejar_Cookie
|
from .compat import compat_b64decode
|
||||||
from .dependencies import (
|
from .dependencies import (
|
||||||
_SECRETSTORAGE_UNAVAILABLE_REASON,
|
_SECRETSTORAGE_UNAVAILABLE_REASON,
|
||||||
secretstorage,
|
secretstorage,
|
||||||
@ -142,7 +143,7 @@ def _extract_firefox_cookies(profile, logger):
|
|||||||
total_cookie_count = len(table)
|
total_cookie_count = len(table)
|
||||||
for i, (host, name, value, path, expiry, is_secure) in enumerate(table):
|
for i, (host, name, value, path, expiry, is_secure) in enumerate(table):
|
||||||
progress_bar.print(f'Loading cookie {i: 6d}/{total_cookie_count: 6d}')
|
progress_bar.print(f'Loading cookie {i: 6d}/{total_cookie_count: 6d}')
|
||||||
cookie = compat_cookiejar_Cookie(
|
cookie = http.cookiejar.Cookie(
|
||||||
version=0, name=name, value=value, port=None, port_specified=False,
|
version=0, name=name, value=value, port=None, port_specified=False,
|
||||||
domain=host, domain_specified=bool(host), domain_initial_dot=host.startswith('.'),
|
domain=host, domain_specified=bool(host), domain_initial_dot=host.startswith('.'),
|
||||||
path=path, path_specified=bool(path), secure=is_secure, expires=expiry, discard=False,
|
path=path, path_specified=bool(path), secure=is_secure, expires=expiry, discard=False,
|
||||||
@ -297,7 +298,7 @@ def _process_chrome_cookie(decryptor, host_key, name, value, encrypted_value, pa
|
|||||||
if value is None:
|
if value is None:
|
||||||
return is_encrypted, None
|
return is_encrypted, None
|
||||||
|
|
||||||
return is_encrypted, compat_cookiejar_Cookie(
|
return is_encrypted, http.cookiejar.Cookie(
|
||||||
version=0, name=name, value=value, port=None, port_specified=False,
|
version=0, name=name, value=value, port=None, port_specified=False,
|
||||||
domain=host_key, domain_specified=bool(host_key), domain_initial_dot=host_key.startswith('.'),
|
domain=host_key, domain_specified=bool(host_key), domain_initial_dot=host_key.startswith('.'),
|
||||||
path=path, path_specified=bool(path), secure=is_secure, expires=expires_utc, discard=False,
|
path=path, path_specified=bool(path), secure=is_secure, expires=expires_utc, discard=False,
|
||||||
@ -589,7 +590,7 @@ def _parse_safari_cookies_record(data, jar, logger):
|
|||||||
|
|
||||||
p.skip_to(record_size, 'space at the end of the record')
|
p.skip_to(record_size, 'space at the end of the record')
|
||||||
|
|
||||||
cookie = compat_cookiejar_Cookie(
|
cookie = http.cookiejar.Cookie(
|
||||||
version=0, name=name, value=value, port=None, port_specified=False,
|
version=0, name=name, value=value, port=None, port_specified=False,
|
||||||
domain=domain, domain_specified=bool(domain), domain_initial_dot=domain.startswith('.'),
|
domain=domain, domain_specified=bool(domain), domain_initial_dot=domain.startswith('.'),
|
||||||
path=path, path_specified=bool(path), secure=is_secure, expires=expiration_date, discard=False,
|
path=path, path_specified=bool(path), secure=is_secure, expires=expiration_date, discard=False,
|
||||||
|
@ -7,7 +7,6 @@
|
|||||||
|
|
||||||
from .fragment import FragmentFD
|
from .fragment import FragmentFD
|
||||||
from ..compat import functools # isort: split
|
from ..compat import functools # isort: split
|
||||||
from ..compat import compat_setenv
|
|
||||||
from ..postprocessor.ffmpeg import EXT_TO_OUT_FORMATS, FFmpegPostProcessor
|
from ..postprocessor.ffmpeg import EXT_TO_OUT_FORMATS, FFmpegPostProcessor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
Popen,
|
Popen,
|
||||||
@ -403,8 +402,8 @@ def _call_downloader(self, tmpfilename, info_dict):
|
|||||||
# We could switch to the following code if we are able to detect version properly
|
# We could switch to the following code if we are able to detect version properly
|
||||||
# args += ['-http_proxy', proxy]
|
# args += ['-http_proxy', proxy]
|
||||||
env = os.environ.copy()
|
env = os.environ.copy()
|
||||||
compat_setenv('HTTP_PROXY', proxy, env=env)
|
env['HTTP_PROXY'] = proxy
|
||||||
compat_setenv('http_proxy', proxy, env=env)
|
env['http_proxy'] = proxy
|
||||||
|
|
||||||
protocol = info_dict.get('protocol')
|
protocol = info_dict.get('protocol')
|
||||||
|
|
||||||
|
@ -1,14 +1,13 @@
|
|||||||
import io
|
import io
|
||||||
import itertools
|
import itertools
|
||||||
|
import struct
|
||||||
import time
|
import time
|
||||||
|
import urllib.error
|
||||||
|
|
||||||
from .fragment import FragmentFD
|
from .fragment import FragmentFD
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_b64decode,
|
compat_b64decode,
|
||||||
compat_etree_fromstring,
|
compat_etree_fromstring,
|
||||||
compat_struct_pack,
|
|
||||||
compat_struct_unpack,
|
|
||||||
compat_urllib_error,
|
|
||||||
compat_urllib_parse_urlparse,
|
compat_urllib_parse_urlparse,
|
||||||
compat_urlparse,
|
compat_urlparse,
|
||||||
)
|
)
|
||||||
@ -35,13 +34,13 @@ def read_bytes(self, n):
|
|||||||
|
|
||||||
# Utility functions for reading numbers and strings
|
# Utility functions for reading numbers and strings
|
||||||
def read_unsigned_long_long(self):
|
def read_unsigned_long_long(self):
|
||||||
return compat_struct_unpack('!Q', self.read_bytes(8))[0]
|
return struct.unpack('!Q', self.read_bytes(8))[0]
|
||||||
|
|
||||||
def read_unsigned_int(self):
|
def read_unsigned_int(self):
|
||||||
return compat_struct_unpack('!I', self.read_bytes(4))[0]
|
return struct.unpack('!I', self.read_bytes(4))[0]
|
||||||
|
|
||||||
def read_unsigned_char(self):
|
def read_unsigned_char(self):
|
||||||
return compat_struct_unpack('!B', self.read_bytes(1))[0]
|
return struct.unpack('!B', self.read_bytes(1))[0]
|
||||||
|
|
||||||
def read_string(self):
|
def read_string(self):
|
||||||
res = b''
|
res = b''
|
||||||
@ -203,11 +202,11 @@ def build_fragments_list(boot_info):
|
|||||||
|
|
||||||
|
|
||||||
def write_unsigned_int(stream, val):
|
def write_unsigned_int(stream, val):
|
||||||
stream.write(compat_struct_pack('!I', val))
|
stream.write(struct.pack('!I', val))
|
||||||
|
|
||||||
|
|
||||||
def write_unsigned_int_24(stream, val):
|
def write_unsigned_int_24(stream, val):
|
||||||
stream.write(compat_struct_pack('!I', val)[1:])
|
stream.write(struct.pack('!I', val)[1:])
|
||||||
|
|
||||||
|
|
||||||
def write_flv_header(stream):
|
def write_flv_header(stream):
|
||||||
@ -411,7 +410,7 @@ def real_download(self, filename, info_dict):
|
|||||||
if box_type == b'mdat':
|
if box_type == b'mdat':
|
||||||
self._append_fragment(ctx, box_data)
|
self._append_fragment(ctx, box_data)
|
||||||
break
|
break
|
||||||
except compat_urllib_error.HTTPError as err:
|
except urllib.error.HTTPError as err:
|
||||||
if live and (err.code == 404 or err.code == 410):
|
if live and (err.code == 404 or err.code == 410):
|
||||||
# We didn't keep up with the live window. Continue
|
# We didn't keep up with the live window. Continue
|
||||||
# with the next available fragment.
|
# with the next available fragment.
|
||||||
|
@ -4,12 +4,14 @@
|
|||||||
import json
|
import json
|
||||||
import math
|
import math
|
||||||
import os
|
import os
|
||||||
|
import struct
|
||||||
import time
|
import time
|
||||||
|
import urllib.error
|
||||||
|
|
||||||
from .common import FileDownloader
|
from .common import FileDownloader
|
||||||
from .http import HttpFD
|
from .http import HttpFD
|
||||||
from ..aes import aes_cbc_decrypt_bytes, unpad_pkcs7
|
from ..aes import aes_cbc_decrypt_bytes, unpad_pkcs7
|
||||||
from ..compat import compat_os_name, compat_struct_pack, compat_urllib_error
|
from ..compat import compat_os_name
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
DownloadError,
|
DownloadError,
|
||||||
encodeFilename,
|
encodeFilename,
|
||||||
@ -348,7 +350,7 @@ def decrypt_fragment(fragment, frag_content):
|
|||||||
decrypt_info = fragment.get('decrypt_info')
|
decrypt_info = fragment.get('decrypt_info')
|
||||||
if not decrypt_info or decrypt_info['METHOD'] != 'AES-128':
|
if not decrypt_info or decrypt_info['METHOD'] != 'AES-128':
|
||||||
return frag_content
|
return frag_content
|
||||||
iv = decrypt_info.get('IV') or compat_struct_pack('>8xq', fragment['media_sequence'])
|
iv = decrypt_info.get('IV') or struct.pack('>8xq', fragment['media_sequence'])
|
||||||
decrypt_info['KEY'] = decrypt_info.get('KEY') or _get_key(info_dict.get('_decryption_key_url') or decrypt_info['URI'])
|
decrypt_info['KEY'] = decrypt_info.get('KEY') or _get_key(info_dict.get('_decryption_key_url') or decrypt_info['URI'])
|
||||||
# Don't decrypt the content in tests since the data is explicitly truncated and it's not to a valid block
|
# Don't decrypt the content in tests since the data is explicitly truncated and it's not to a valid block
|
||||||
# size (see https://github.com/ytdl-org/youtube-dl/pull/27660). Tests only care that the correct data downloaded,
|
# size (see https://github.com/ytdl-org/youtube-dl/pull/27660). Tests only care that the correct data downloaded,
|
||||||
@ -457,7 +459,7 @@ def download_fragment(fragment, ctx):
|
|||||||
if self._download_fragment(ctx, fragment['url'], info_dict, headers):
|
if self._download_fragment(ctx, fragment['url'], info_dict, headers):
|
||||||
break
|
break
|
||||||
return
|
return
|
||||||
except (compat_urllib_error.HTTPError, http.client.IncompleteRead) as err:
|
except (urllib.error.HTTPError, http.client.IncompleteRead) as err:
|
||||||
# Unavailable (possibly temporary) fragments may be served.
|
# Unavailable (possibly temporary) fragments may be served.
|
||||||
# First we try to retry then either skip or abort.
|
# First we try to retry then either skip or abort.
|
||||||
# See https://github.com/ytdl-org/youtube-dl/issues/10165,
|
# See https://github.com/ytdl-org/youtube-dl/issues/10165,
|
||||||
|
@ -3,9 +3,10 @@
|
|||||||
import socket
|
import socket
|
||||||
import ssl
|
import ssl
|
||||||
import time
|
import time
|
||||||
|
import urllib.error
|
||||||
|
import http.client
|
||||||
|
|
||||||
from .common import FileDownloader
|
from .common import FileDownloader
|
||||||
from ..compat import compat_http_client, compat_urllib_error
|
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ContentTooShortError,
|
ContentTooShortError,
|
||||||
ThrottledDownload,
|
ThrottledDownload,
|
||||||
@ -24,7 +25,7 @@
|
|||||||
socket.timeout, # compat: py < 3.10
|
socket.timeout, # compat: py < 3.10
|
||||||
ConnectionError,
|
ConnectionError,
|
||||||
ssl.SSLError,
|
ssl.SSLError,
|
||||||
compat_http_client.HTTPException
|
http.client.HTTPException
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -155,7 +156,7 @@ def establish_connection():
|
|||||||
ctx.resume_len = 0
|
ctx.resume_len = 0
|
||||||
ctx.open_mode = 'wb'
|
ctx.open_mode = 'wb'
|
||||||
ctx.data_len = ctx.content_len = int_or_none(ctx.data.info().get('Content-length', None))
|
ctx.data_len = ctx.content_len = int_or_none(ctx.data.info().get('Content-length', None))
|
||||||
except compat_urllib_error.HTTPError as err:
|
except urllib.error.HTTPError as err:
|
||||||
if err.code == 416:
|
if err.code == 416:
|
||||||
# Unable to resume (requested range not satisfiable)
|
# Unable to resume (requested range not satisfiable)
|
||||||
try:
|
try:
|
||||||
@ -163,7 +164,7 @@ def establish_connection():
|
|||||||
ctx.data = self.ydl.urlopen(
|
ctx.data = self.ydl.urlopen(
|
||||||
sanitized_Request(url, request_data, headers))
|
sanitized_Request(url, request_data, headers))
|
||||||
content_length = ctx.data.info()['Content-Length']
|
content_length = ctx.data.info()['Content-Length']
|
||||||
except compat_urllib_error.HTTPError as err:
|
except urllib.error.HTTPError as err:
|
||||||
if err.code < 500 or err.code >= 600:
|
if err.code < 500 or err.code >= 600:
|
||||||
raise
|
raise
|
||||||
else:
|
else:
|
||||||
@ -196,7 +197,7 @@ def establish_connection():
|
|||||||
# Unexpected HTTP error
|
# Unexpected HTTP error
|
||||||
raise
|
raise
|
||||||
raise RetryDownload(err)
|
raise RetryDownload(err)
|
||||||
except compat_urllib_error.URLError as err:
|
except urllib.error.URLError as err:
|
||||||
if isinstance(err.reason, ssl.CertificateError):
|
if isinstance(err.reason, ssl.CertificateError):
|
||||||
raise
|
raise
|
||||||
raise RetryDownload(err)
|
raise RetryDownload(err)
|
||||||
|
@ -2,9 +2,9 @@
|
|||||||
import io
|
import io
|
||||||
import struct
|
import struct
|
||||||
import time
|
import time
|
||||||
|
import urllib.error
|
||||||
|
|
||||||
from .fragment import FragmentFD
|
from .fragment import FragmentFD
|
||||||
from ..compat import compat_urllib_error
|
|
||||||
|
|
||||||
u8 = struct.Struct('>B')
|
u8 = struct.Struct('>B')
|
||||||
u88 = struct.Struct('>Bx')
|
u88 = struct.Struct('>Bx')
|
||||||
@ -268,7 +268,7 @@ def real_download(self, filename, info_dict):
|
|||||||
extra_state['ism_track_written'] = True
|
extra_state['ism_track_written'] = True
|
||||||
self._append_fragment(ctx, frag_content)
|
self._append_fragment(ctx, frag_content)
|
||||||
break
|
break
|
||||||
except compat_urllib_error.HTTPError as err:
|
except urllib.error.HTTPError as err:
|
||||||
count += 1
|
count += 1
|
||||||
if count <= fragment_retries:
|
if count <= fragment_retries:
|
||||||
self.report_retry_fragment(err, frag_index, count, fragment_retries)
|
self.report_retry_fragment(err, frag_index, count, fragment_retries)
|
||||||
|
@ -1,8 +1,8 @@
|
|||||||
import json
|
import json
|
||||||
import time
|
import time
|
||||||
|
import urllib.error
|
||||||
|
|
||||||
from .fragment import FragmentFD
|
from .fragment import FragmentFD
|
||||||
from ..compat import compat_urllib_error
|
|
||||||
from ..utils import RegexNotFoundError, dict_get, int_or_none, try_get
|
from ..utils import RegexNotFoundError, dict_get, int_or_none, try_get
|
||||||
|
|
||||||
|
|
||||||
@ -128,7 +128,7 @@ def download_and_parse_fragment(url, frag_index, request_data=None, headers=None
|
|||||||
elif info_dict['protocol'] == 'youtube_live_chat':
|
elif info_dict['protocol'] == 'youtube_live_chat':
|
||||||
continuation_id, offset, click_tracking_params = parse_actions_live(live_chat_continuation)
|
continuation_id, offset, click_tracking_params = parse_actions_live(live_chat_continuation)
|
||||||
return True, continuation_id, offset, click_tracking_params
|
return True, continuation_id, offset, click_tracking_params
|
||||||
except compat_urllib_error.HTTPError as err:
|
except urllib.error.HTTPError as err:
|
||||||
count += 1
|
count += 1
|
||||||
if count <= fragment_retries:
|
if count <= fragment_retries:
|
||||||
self.report_retry_fragment(err, frag_index, count, fragment_retries)
|
self.report_retry_fragment(err, frag_index, count, fragment_retries)
|
||||||
|
@ -7,12 +7,13 @@
|
|||||||
import re
|
import re
|
||||||
import struct
|
import struct
|
||||||
import time
|
import time
|
||||||
|
import urllib.request
|
||||||
import urllib.response
|
import urllib.response
|
||||||
import uuid
|
import uuid
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..aes import aes_ecb_decrypt
|
from ..aes import aes_ecb_decrypt
|
||||||
from ..compat import compat_urllib_parse_urlparse, compat_urllib_request
|
from ..compat import compat_urllib_parse_urlparse
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
bytes_to_intlist,
|
bytes_to_intlist,
|
||||||
@ -33,7 +34,7 @@ def add_opener(ydl, handler):
|
|||||||
''' Add a handler for opening URLs, like _download_webpage '''
|
''' Add a handler for opening URLs, like _download_webpage '''
|
||||||
# https://github.com/python/cpython/blob/main/Lib/urllib/request.py#L426
|
# https://github.com/python/cpython/blob/main/Lib/urllib/request.py#L426
|
||||||
# https://github.com/python/cpython/blob/main/Lib/urllib/request.py#L605
|
# https://github.com/python/cpython/blob/main/Lib/urllib/request.py#L605
|
||||||
assert isinstance(ydl._opener, compat_urllib_request.OpenerDirector)
|
assert isinstance(ydl._opener, urllib.request.OpenerDirector)
|
||||||
ydl._opener.add_handler(handler)
|
ydl._opener.add_handler(handler)
|
||||||
|
|
||||||
|
|
||||||
@ -46,7 +47,7 @@ def remove_opener(ydl, handler):
|
|||||||
# https://github.com/python/cpython/blob/main/Lib/urllib/request.py#L426
|
# https://github.com/python/cpython/blob/main/Lib/urllib/request.py#L426
|
||||||
# https://github.com/python/cpython/blob/main/Lib/urllib/request.py#L605
|
# https://github.com/python/cpython/blob/main/Lib/urllib/request.py#L605
|
||||||
opener = ydl._opener
|
opener = ydl._opener
|
||||||
assert isinstance(ydl._opener, compat_urllib_request.OpenerDirector)
|
assert isinstance(ydl._opener, urllib.request.OpenerDirector)
|
||||||
if isinstance(handler, (type, tuple)):
|
if isinstance(handler, (type, tuple)):
|
||||||
find_cp = lambda x: isinstance(x, handler)
|
find_cp = lambda x: isinstance(x, handler)
|
||||||
else:
|
else:
|
||||||
@ -96,7 +97,7 @@ def remove_opener(ydl, handler):
|
|||||||
opener.handlers[:] = [x for x in opener.handlers if not find_cp(x)]
|
opener.handlers[:] = [x for x in opener.handlers if not find_cp(x)]
|
||||||
|
|
||||||
|
|
||||||
class AbemaLicenseHandler(compat_urllib_request.BaseHandler):
|
class AbemaLicenseHandler(urllib.request.BaseHandler):
|
||||||
handler_order = 499
|
handler_order = 499
|
||||||
STRTABLE = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
|
STRTABLE = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
|
||||||
HKEY = b'3AF0298C219469522A313570E8583005A642E73EDD58E3EA2FB7339D3DF1597E'
|
HKEY = b'3AF0298C219469522A313570E8583005A642E73EDD58E3EA2FB7339D3DF1597E'
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
import getpass
|
||||||
import json
|
import json
|
||||||
import re
|
import re
|
||||||
import time
|
import time
|
||||||
@ -5,19 +6,15 @@
|
|||||||
import xml.etree.ElementTree as etree
|
import xml.etree.ElementTree as etree
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import compat_urlparse
|
||||||
compat_urlparse,
|
|
||||||
compat_getpass
|
|
||||||
)
|
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
unescapeHTML,
|
|
||||||
urlencode_postdata,
|
|
||||||
unified_timestamp,
|
|
||||||
ExtractorError,
|
|
||||||
NO_DEFAULT,
|
NO_DEFAULT,
|
||||||
|
ExtractorError,
|
||||||
|
unescapeHTML,
|
||||||
|
unified_timestamp,
|
||||||
|
urlencode_postdata,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
MSO_INFO = {
|
MSO_INFO = {
|
||||||
'DTV': {
|
'DTV': {
|
||||||
'name': 'DIRECTV',
|
'name': 'DIRECTV',
|
||||||
@ -1506,7 +1503,7 @@ def extract_redirect_url(html, url=None, fatal=False):
|
|||||||
'send_confirm_link': False,
|
'send_confirm_link': False,
|
||||||
'send_token': True
|
'send_token': True
|
||||||
}))
|
}))
|
||||||
philo_code = compat_getpass('Type auth code you have received [Return]: ')
|
philo_code = getpass.getpass('Type auth code you have received [Return]: ')
|
||||||
self._download_webpage(
|
self._download_webpage(
|
||||||
'https://idp.philo.com/auth/update/login_code', video_id, 'Submitting token', data=urlencode_postdata({
|
'https://idp.philo.com/auth/update/login_code', video_id, 'Submitting token', data=urlencode_postdata({
|
||||||
'token': philo_code
|
'token': philo_code
|
||||||
|
@ -1,36 +1,34 @@
|
|||||||
import re
|
|
||||||
import json
|
import json
|
||||||
|
import re
|
||||||
|
import urllib.parse
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from .youtube import YoutubeIE, YoutubeBaseInfoExtractor
|
from .youtube import YoutubeBaseInfoExtractor, YoutubeIE
|
||||||
from ..compat import (
|
from ..compat import compat_HTTPError, compat_urllib_parse_unquote
|
||||||
compat_urllib_parse_unquote,
|
|
||||||
compat_urllib_parse_unquote_plus,
|
|
||||||
compat_HTTPError
|
|
||||||
)
|
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
KNOWN_EXTENSIONS,
|
||||||
|
ExtractorError,
|
||||||
|
HEADRequest,
|
||||||
bug_reports_message,
|
bug_reports_message,
|
||||||
clean_html,
|
clean_html,
|
||||||
dict_get,
|
dict_get,
|
||||||
extract_attributes,
|
extract_attributes,
|
||||||
ExtractorError,
|
|
||||||
get_element_by_id,
|
get_element_by_id,
|
||||||
HEADRequest,
|
|
||||||
int_or_none,
|
int_or_none,
|
||||||
join_nonempty,
|
join_nonempty,
|
||||||
KNOWN_EXTENSIONS,
|
|
||||||
merge_dicts,
|
merge_dicts,
|
||||||
mimetype2ext,
|
mimetype2ext,
|
||||||
orderedSet,
|
orderedSet,
|
||||||
parse_duration,
|
parse_duration,
|
||||||
parse_qs,
|
parse_qs,
|
||||||
str_to_int,
|
|
||||||
str_or_none,
|
str_or_none,
|
||||||
|
str_to_int,
|
||||||
traverse_obj,
|
traverse_obj,
|
||||||
try_get,
|
try_get,
|
||||||
unified_strdate,
|
unified_strdate,
|
||||||
unified_timestamp,
|
unified_timestamp,
|
||||||
|
url_or_none,
|
||||||
urlhandle_detect_ext,
|
urlhandle_detect_ext,
|
||||||
url_or_none
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -143,7 +141,7 @@ def _playlist_data(webpage):
|
|||||||
return json.loads(extract_attributes(element)['value'])
|
return json.loads(extract_attributes(element)['value'])
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = compat_urllib_parse_unquote_plus(self._match_id(url))
|
video_id = urllib.parse.unquote_plus(self._match_id(url))
|
||||||
identifier, entry_id = (video_id.split('/', 1) + [None])[:2]
|
identifier, entry_id = (video_id.split('/', 1) + [None])[:2]
|
||||||
|
|
||||||
# Archive.org metadata API doesn't clearly demarcate playlist entries
|
# Archive.org metadata API doesn't clearly demarcate playlist entries
|
||||||
|
@ -1,16 +1,12 @@
|
|||||||
import xml.etree.ElementTree
|
|
||||||
import functools
|
import functools
|
||||||
import itertools
|
import itertools
|
||||||
import json
|
import json
|
||||||
import re
|
import re
|
||||||
|
import urllib.error
|
||||||
|
import xml.etree.ElementTree
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import compat_HTTPError, compat_str, compat_urlparse
|
||||||
compat_HTTPError,
|
|
||||||
compat_str,
|
|
||||||
compat_urllib_error,
|
|
||||||
compat_urlparse,
|
|
||||||
)
|
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
OnDemandPagedList,
|
OnDemandPagedList,
|
||||||
@ -391,7 +387,7 @@ def _process_media_selector(self, media_selection, programme_id):
|
|||||||
href, programme_id, ext='mp4', entry_protocol='m3u8_native',
|
href, programme_id, ext='mp4', entry_protocol='m3u8_native',
|
||||||
m3u8_id=format_id, fatal=False)
|
m3u8_id=format_id, fatal=False)
|
||||||
except ExtractorError as e:
|
except ExtractorError as e:
|
||||||
if not (isinstance(e.exc_info[1], compat_urllib_error.HTTPError)
|
if not (isinstance(e.exc_info[1], urllib.error.HTTPError)
|
||||||
and e.exc_info[1].code in (403, 404)):
|
and e.exc_info[1].code in (403, 404)):
|
||||||
raise
|
raise
|
||||||
fmts = []
|
fmts = []
|
||||||
|
@ -1,13 +1,9 @@
|
|||||||
import codecs
|
import codecs
|
||||||
import re
|
|
||||||
import json
|
import json
|
||||||
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import compat_ord, compat_urllib_parse_unquote
|
||||||
compat_chr,
|
|
||||||
compat_ord,
|
|
||||||
compat_urllib_parse_unquote,
|
|
||||||
)
|
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
float_or_none,
|
float_or_none,
|
||||||
@ -16,8 +12,8 @@
|
|||||||
multipart_encode,
|
multipart_encode,
|
||||||
parse_duration,
|
parse_duration,
|
||||||
random_birthday,
|
random_birthday,
|
||||||
urljoin,
|
|
||||||
try_get,
|
try_get,
|
||||||
|
urljoin,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -144,7 +140,7 @@ def decrypt_file(a):
|
|||||||
b = []
|
b = []
|
||||||
for c in a:
|
for c in a:
|
||||||
f = compat_ord(c)
|
f = compat_ord(c)
|
||||||
b.append(compat_chr(33 + (f + 14) % 94) if 33 <= f <= 126 else compat_chr(f))
|
b.append(chr(33 + (f + 14) % 94) if 33 <= f <= 126 else chr(f))
|
||||||
a = ''.join(b)
|
a = ''.join(b)
|
||||||
a = a.replace('.cda.mp4', '')
|
a = a.replace('.cda.mp4', '')
|
||||||
for p in ('.2cda.pl', '.3cda.pl'):
|
for p in ('.2cda.pl', '.3cda.pl'):
|
||||||
|
@ -1,11 +1,11 @@
|
|||||||
import itertools
|
import itertools
|
||||||
import json
|
import json
|
||||||
|
import urllib.parse
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_urllib_parse_unquote_plus
|
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
clean_html,
|
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
|
clean_html,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
str_to_int,
|
str_to_int,
|
||||||
url_or_none,
|
url_or_none,
|
||||||
@ -47,8 +47,8 @@ def _get_post(self, id, post_data):
|
|||||||
'id': id,
|
'id': id,
|
||||||
'extractor_key': ChingariIE.ie_key(),
|
'extractor_key': ChingariIE.ie_key(),
|
||||||
'extractor': 'Chingari',
|
'extractor': 'Chingari',
|
||||||
'title': compat_urllib_parse_unquote_plus(clean_html(post_data.get('caption'))),
|
'title': urllib.parse.unquote_plus(clean_html(post_data.get('caption'))),
|
||||||
'description': compat_urllib_parse_unquote_plus(clean_html(post_data.get('caption'))),
|
'description': urllib.parse.unquote_plus(clean_html(post_data.get('caption'))),
|
||||||
'duration': media_data.get('duration'),
|
'duration': media_data.get('duration'),
|
||||||
'thumbnail': url_or_none(thumbnail),
|
'thumbnail': url_or_none(thumbnail),
|
||||||
'like_count': post_data.get('likeCount'),
|
'like_count': post_data.get('likeCount'),
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
import base64
|
import base64
|
||||||
import collections
|
import collections
|
||||||
|
import getpass
|
||||||
import hashlib
|
import hashlib
|
||||||
import itertools
|
import itertools
|
||||||
import json
|
import json
|
||||||
@ -9,22 +10,20 @@
|
|||||||
import random
|
import random
|
||||||
import sys
|
import sys
|
||||||
import time
|
import time
|
||||||
|
import urllib.request
|
||||||
import xml.etree.ElementTree
|
import xml.etree.ElementTree
|
||||||
|
import http.client
|
||||||
|
import http.cookiejar
|
||||||
|
import http.cookies
|
||||||
|
|
||||||
from ..compat import functools, re # isort: split
|
from ..compat import functools, re # isort: split
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_cookiejar_Cookie,
|
|
||||||
compat_cookies_SimpleCookie,
|
|
||||||
compat_etree_fromstring,
|
compat_etree_fromstring,
|
||||||
compat_expanduser,
|
compat_expanduser,
|
||||||
compat_getpass,
|
|
||||||
compat_http_client,
|
|
||||||
compat_os_name,
|
compat_os_name,
|
||||||
compat_str,
|
compat_str,
|
||||||
compat_urllib_error,
|
|
||||||
compat_urllib_parse_unquote,
|
compat_urllib_parse_unquote,
|
||||||
compat_urllib_parse_urlencode,
|
compat_urllib_parse_urlencode,
|
||||||
compat_urllib_request,
|
|
||||||
compat_urlparse,
|
compat_urlparse,
|
||||||
)
|
)
|
||||||
from ..downloader import FileDownloader
|
from ..downloader import FileDownloader
|
||||||
@ -671,7 +670,7 @@ def extract(self, url):
|
|||||||
if hasattr(e, 'countries'):
|
if hasattr(e, 'countries'):
|
||||||
kwargs['countries'] = e.countries
|
kwargs['countries'] = e.countries
|
||||||
raise type(e)(e.orig_msg, **kwargs)
|
raise type(e)(e.orig_msg, **kwargs)
|
||||||
except compat_http_client.IncompleteRead as e:
|
except http.client.IncompleteRead as e:
|
||||||
raise ExtractorError('A network error has occurred.', cause=e, expected=True, video_id=self.get_temp_id(url))
|
raise ExtractorError('A network error has occurred.', cause=e, expected=True, video_id=self.get_temp_id(url))
|
||||||
except (KeyError, StopIteration) as e:
|
except (KeyError, StopIteration) as e:
|
||||||
raise ExtractorError('An extractor error has occurred.', cause=e, video_id=self.get_temp_id(url))
|
raise ExtractorError('An extractor error has occurred.', cause=e, video_id=self.get_temp_id(url))
|
||||||
@ -730,7 +729,7 @@ def IE_NAME(cls):
|
|||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def __can_accept_status_code(err, expected_status):
|
def __can_accept_status_code(err, expected_status):
|
||||||
assert isinstance(err, compat_urllib_error.HTTPError)
|
assert isinstance(err, urllib.error.HTTPError)
|
||||||
if expected_status is None:
|
if expected_status is None:
|
||||||
return False
|
return False
|
||||||
elif callable(expected_status):
|
elif callable(expected_status):
|
||||||
@ -739,7 +738,7 @@ def __can_accept_status_code(err, expected_status):
|
|||||||
return err.code in variadic(expected_status)
|
return err.code in variadic(expected_status)
|
||||||
|
|
||||||
def _create_request(self, url_or_request, data=None, headers={}, query={}):
|
def _create_request(self, url_or_request, data=None, headers={}, query={}):
|
||||||
if isinstance(url_or_request, compat_urllib_request.Request):
|
if isinstance(url_or_request, urllib.request.Request):
|
||||||
return update_Request(url_or_request, data=data, headers=headers, query=query)
|
return update_Request(url_or_request, data=data, headers=headers, query=query)
|
||||||
if query:
|
if query:
|
||||||
url_or_request = update_url_query(url_or_request, query)
|
url_or_request = update_url_query(url_or_request, query)
|
||||||
@ -779,7 +778,7 @@ def _request_webpage(self, url_or_request, video_id, note=None, errnote=None, fa
|
|||||||
try:
|
try:
|
||||||
return self._downloader.urlopen(self._create_request(url_or_request, data, headers, query))
|
return self._downloader.urlopen(self._create_request(url_or_request, data, headers, query))
|
||||||
except network_exceptions as err:
|
except network_exceptions as err:
|
||||||
if isinstance(err, compat_urllib_error.HTTPError):
|
if isinstance(err, urllib.error.HTTPError):
|
||||||
if self.__can_accept_status_code(err, expected_status):
|
if self.__can_accept_status_code(err, expected_status):
|
||||||
# Retain reference to error to prevent file object from
|
# Retain reference to error to prevent file object from
|
||||||
# being closed before it can be read. Works around the
|
# being closed before it can be read. Works around the
|
||||||
@ -807,7 +806,7 @@ def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=
|
|||||||
|
|
||||||
Arguments:
|
Arguments:
|
||||||
url_or_request -- plain text URL as a string or
|
url_or_request -- plain text URL as a string or
|
||||||
a compat_urllib_request.Requestobject
|
a urllib.request.Request object
|
||||||
video_id -- Video/playlist/item identifier (string)
|
video_id -- Video/playlist/item identifier (string)
|
||||||
|
|
||||||
Keyword arguments:
|
Keyword arguments:
|
||||||
@ -1056,7 +1055,7 @@ def _download_webpage(
|
|||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
return self.__download_webpage(url_or_request, video_id, note, errnote, None, fatal, *args, **kwargs)
|
return self.__download_webpage(url_or_request, video_id, note, errnote, None, fatal, *args, **kwargs)
|
||||||
except compat_http_client.IncompleteRead as e:
|
except http.client.IncompleteRead as e:
|
||||||
try_count += 1
|
try_count += 1
|
||||||
if try_count >= tries:
|
if try_count >= tries:
|
||||||
raise e
|
raise e
|
||||||
@ -1292,7 +1291,7 @@ def _get_tfa_info(self, note='two-factor verification code'):
|
|||||||
if tfa is not None:
|
if tfa is not None:
|
||||||
return tfa
|
return tfa
|
||||||
|
|
||||||
return compat_getpass('Type %s and press [Return]: ' % note)
|
return getpass.getpass('Type %s and press [Return]: ' % note)
|
||||||
|
|
||||||
# Helper functions for extracting OpenGraph info
|
# Helper functions for extracting OpenGraph info
|
||||||
@staticmethod
|
@staticmethod
|
||||||
@ -3597,15 +3596,15 @@ def _float(self, v, name, fatal=False, **kwargs):
|
|||||||
|
|
||||||
def _set_cookie(self, domain, name, value, expire_time=None, port=None,
|
def _set_cookie(self, domain, name, value, expire_time=None, port=None,
|
||||||
path='/', secure=False, discard=False, rest={}, **kwargs):
|
path='/', secure=False, discard=False, rest={}, **kwargs):
|
||||||
cookie = compat_cookiejar_Cookie(
|
cookie = http.cookiejar.Cookie(
|
||||||
0, name, value, port, port is not None, domain, True,
|
0, name, value, port, port is not None, domain, True,
|
||||||
domain.startswith('.'), path, True, secure, expire_time,
|
domain.startswith('.'), path, True, secure, expire_time,
|
||||||
discard, None, None, rest)
|
discard, None, None, rest)
|
||||||
self.cookiejar.set_cookie(cookie)
|
self.cookiejar.set_cookie(cookie)
|
||||||
|
|
||||||
def _get_cookies(self, url):
|
def _get_cookies(self, url):
|
||||||
""" Return a compat_cookies_SimpleCookie with the cookies for the url """
|
""" Return a http.cookies.SimpleCookie with the cookies for the url """
|
||||||
return compat_cookies_SimpleCookie(self._downloader._calc_cookies(url))
|
return http.cookies.SimpleCookie(self._downloader._calc_cookies(url))
|
||||||
|
|
||||||
def _apply_first_set_cookie_header(self, url_handle, cookie):
|
def _apply_first_set_cookie_header(self, url_handle, cookie):
|
||||||
"""
|
"""
|
||||||
|
@ -1,19 +1,20 @@
|
|||||||
import base64
|
import base64
|
||||||
import re
|
|
||||||
import json
|
import json
|
||||||
import zlib
|
import re
|
||||||
|
import urllib.request
|
||||||
import xml.etree.ElementTree
|
import xml.etree.ElementTree
|
||||||
|
import zlib
|
||||||
from hashlib import sha1
|
from hashlib import sha1
|
||||||
from math import pow, sqrt, floor
|
from math import floor, pow, sqrt
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from .vrv import VRVBaseIE
|
from .vrv import VRVBaseIE
|
||||||
|
from ..aes import aes_cbc_decrypt
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_b64decode,
|
compat_b64decode,
|
||||||
compat_etree_fromstring,
|
compat_etree_fromstring,
|
||||||
compat_str,
|
compat_str,
|
||||||
compat_urllib_parse_urlencode,
|
compat_urllib_parse_urlencode,
|
||||||
compat_urllib_request,
|
|
||||||
compat_urlparse,
|
compat_urlparse,
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
@ -22,8 +23,8 @@
|
|||||||
extract_attributes,
|
extract_attributes,
|
||||||
float_or_none,
|
float_or_none,
|
||||||
format_field,
|
format_field,
|
||||||
intlist_to_bytes,
|
|
||||||
int_or_none,
|
int_or_none,
|
||||||
|
intlist_to_bytes,
|
||||||
join_nonempty,
|
join_nonempty,
|
||||||
lowercase_escape,
|
lowercase_escape,
|
||||||
merge_dicts,
|
merge_dicts,
|
||||||
@ -34,9 +35,6 @@
|
|||||||
try_get,
|
try_get,
|
||||||
xpath_text,
|
xpath_text,
|
||||||
)
|
)
|
||||||
from ..aes import (
|
|
||||||
aes_cbc_decrypt,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class CrunchyrollBaseIE(InfoExtractor):
|
class CrunchyrollBaseIE(InfoExtractor):
|
||||||
@ -259,7 +257,7 @@ class CrunchyrollIE(CrunchyrollBaseIE, VRVBaseIE):
|
|||||||
}
|
}
|
||||||
|
|
||||||
def _download_webpage(self, url_or_request, *args, **kwargs):
|
def _download_webpage(self, url_or_request, *args, **kwargs):
|
||||||
request = (url_or_request if isinstance(url_or_request, compat_urllib_request.Request)
|
request = (url_or_request if isinstance(url_or_request, urllib.request.Request)
|
||||||
else sanitized_Request(url_or_request))
|
else sanitized_Request(url_or_request))
|
||||||
# Accept-Language must be set explicitly to accept any language to avoid issues
|
# Accept-Language must be set explicitly to accept any language to avoid issues
|
||||||
# similar to https://github.com/ytdl-org/youtube-dl/issues/6797.
|
# similar to https://github.com/ytdl-org/youtube-dl/issues/6797.
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
import base64
|
import base64
|
||||||
import json
|
import json
|
||||||
import re
|
import re
|
||||||
import urllib
|
import urllib.parse
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from .adobepass import AdobePassIE
|
from .adobepass import AdobePassIE
|
||||||
|
@ -1,18 +1,18 @@
|
|||||||
import json
|
import json
|
||||||
import re
|
import re
|
||||||
|
import urllib.parse
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_etree_fromstring,
|
compat_etree_fromstring,
|
||||||
compat_str,
|
compat_str,
|
||||||
compat_urllib_parse_unquote,
|
compat_urllib_parse_unquote,
|
||||||
compat_urllib_parse_unquote_plus,
|
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
ExtractorError,
|
||||||
clean_html,
|
clean_html,
|
||||||
determine_ext,
|
determine_ext,
|
||||||
error_to_compat_str,
|
error_to_compat_str,
|
||||||
ExtractorError,
|
|
||||||
float_or_none,
|
float_or_none,
|
||||||
get_element_by_id,
|
get_element_by_id,
|
||||||
get_first,
|
get_first,
|
||||||
@ -467,7 +467,7 @@ def extract_dash_manifest(video, formats):
|
|||||||
dash_manifest = video.get('dash_manifest')
|
dash_manifest = video.get('dash_manifest')
|
||||||
if dash_manifest:
|
if dash_manifest:
|
||||||
formats.extend(self._parse_mpd_formats(
|
formats.extend(self._parse_mpd_formats(
|
||||||
compat_etree_fromstring(compat_urllib_parse_unquote_plus(dash_manifest))))
|
compat_etree_fromstring(urllib.parse.unquote_plus(dash_manifest))))
|
||||||
|
|
||||||
def process_formats(formats):
|
def process_formats(formats):
|
||||||
# Downloads with browser's User-Agent are rate limited. Working around
|
# Downloads with browser's User-Agent are rate limited. Working around
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
import itertools
|
import itertools
|
||||||
import re
|
import re
|
||||||
import urllib
|
import urllib.parse
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
@ -1,10 +1,10 @@
|
|||||||
import random
|
import random
|
||||||
|
import urllib.parse
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_urllib_parse_unquote_plus
|
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
int_or_none,
|
|
||||||
float_or_none,
|
float_or_none,
|
||||||
|
int_or_none,
|
||||||
timeconvert,
|
timeconvert,
|
||||||
update_url_query,
|
update_url_query,
|
||||||
xpath_text,
|
xpath_text,
|
||||||
@ -66,7 +66,7 @@ def _real_extract(self, url):
|
|||||||
formats = []
|
formats = []
|
||||||
for quality in quality_options:
|
for quality in quality_options:
|
||||||
formats.append({
|
formats.append({
|
||||||
'url': compat_urllib_parse_unquote_plus(quality.attrib['url']),
|
'url': urllib.parse.unquote_plus(quality.attrib['url']),
|
||||||
'height': int_or_none(quality.attrib.get('height')),
|
'height': int_or_none(quality.attrib.get('height')),
|
||||||
'width': int_or_none(quality.attrib.get('width')),
|
'width': int_or_none(quality.attrib.get('width')),
|
||||||
'vbr': float_or_none(quality.attrib.get('bitratebits'), scale=1000),
|
'vbr': float_or_none(quality.attrib.get('bitratebits'), scale=1000),
|
||||||
|
@ -1,17 +1,14 @@
|
|||||||
import json
|
import json
|
||||||
import re
|
import re
|
||||||
|
import urllib.parse
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import compat_parse_qs, compat_urllib_parse_unquote
|
||||||
compat_parse_qs,
|
|
||||||
compat_urllib_parse,
|
|
||||||
compat_urllib_parse_unquote,
|
|
||||||
)
|
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
determine_ext,
|
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
int_or_none,
|
determine_ext,
|
||||||
get_element_by_attribute,
|
get_element_by_attribute,
|
||||||
|
int_or_none,
|
||||||
mimetype2ext,
|
mimetype2ext,
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -143,7 +140,7 @@ def _real_extract(self, url):
|
|||||||
|
|
||||||
headers = {
|
headers = {
|
||||||
# Disable family filter
|
# Disable family filter
|
||||||
'Cookie': 'user=%s; ' % compat_urllib_parse.quote(json.dumps({'ffilter': False}))
|
'Cookie': 'user=%s; ' % urllib.parse.quote(json.dumps({'ffilter': False}))
|
||||||
}
|
}
|
||||||
|
|
||||||
# AnyClip videos require the flashversion cookie so that we get the link
|
# AnyClip videos require the flashversion cookie so that we get the link
|
||||||
|
@ -3,7 +3,6 @@
|
|||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_b64decode,
|
compat_b64decode,
|
||||||
compat_chr,
|
|
||||||
compat_ord,
|
compat_ord,
|
||||||
compat_str,
|
compat_str,
|
||||||
compat_urllib_parse_unquote,
|
compat_urllib_parse_unquote,
|
||||||
@ -72,7 +71,7 @@ class MixcloudIE(MixcloudBaseIE):
|
|||||||
def _decrypt_xor_cipher(key, ciphertext):
|
def _decrypt_xor_cipher(key, ciphertext):
|
||||||
"""Encrypt/Decrypt XOR cipher. Both ways are possible because it's XOR."""
|
"""Encrypt/Decrypt XOR cipher. Both ways are possible because it's XOR."""
|
||||||
return ''.join([
|
return ''.join([
|
||||||
compat_chr(compat_ord(ch) ^ compat_ord(k))
|
chr(compat_ord(ch) ^ compat_ord(k))
|
||||||
for ch, k in zip(ciphertext, itertools.cycle(key))])
|
for ch, k in zip(ciphertext, itertools.cycle(key))])
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
|
@ -1,13 +1,7 @@
|
|||||||
|
import urllib.parse
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..utils import parse_duration, remove_end, unified_strdate, urljoin
|
||||||
compat_urllib_parse_unquote_plus
|
|
||||||
)
|
|
||||||
from ..utils import (
|
|
||||||
parse_duration,
|
|
||||||
remove_end,
|
|
||||||
unified_strdate,
|
|
||||||
urljoin
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class NDTVIE(InfoExtractor):
|
class NDTVIE(InfoExtractor):
|
||||||
@ -80,7 +74,7 @@ def _real_extract(self, url):
|
|||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
# '__title' does not contain extra words such as sub-site name, "Video" etc.
|
# '__title' does not contain extra words such as sub-site name, "Video" etc.
|
||||||
title = compat_urllib_parse_unquote_plus(
|
title = urllib.parse.unquote_plus(
|
||||||
self._search_regex(r"__title\s*=\s*'([^']+)'", webpage, 'title', default=None)
|
self._search_regex(r"__title\s*=\s*'([^']+)'", webpage, 'title', default=None)
|
||||||
or self._og_search_title(webpage))
|
or self._og_search_title(webpage))
|
||||||
|
|
||||||
|
@ -1,14 +1,11 @@
|
|||||||
import itertools
|
import itertools
|
||||||
import json
|
import json
|
||||||
import time
|
import time
|
||||||
import urllib
|
import urllib.parse
|
||||||
|
import urllib.error
|
||||||
|
|
||||||
from ..utils import (
|
|
||||||
ExtractorError,
|
|
||||||
parse_iso8601,
|
|
||||||
try_get,
|
|
||||||
)
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
from ..utils import ExtractorError, parse_iso8601, try_get
|
||||||
|
|
||||||
|
|
||||||
class NebulaBaseIE(InfoExtractor):
|
class NebulaBaseIE(InfoExtractor):
|
||||||
|
@ -1,18 +1,12 @@
|
|||||||
from hashlib import md5
|
import itertools
|
||||||
|
import re
|
||||||
from base64 import b64encode
|
from base64 import b64encode
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
import re
|
from hashlib import md5
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import compat_str, compat_urllib_parse_urlencode
|
||||||
compat_urllib_parse_urlencode,
|
from ..utils import float_or_none, sanitized_Request
|
||||||
compat_str,
|
|
||||||
compat_itertools_count,
|
|
||||||
)
|
|
||||||
from ..utils import (
|
|
||||||
sanitized_Request,
|
|
||||||
float_or_none,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class NetEaseMusicBaseIE(InfoExtractor):
|
class NetEaseMusicBaseIE(InfoExtractor):
|
||||||
@ -449,7 +443,7 @@ def _real_extract(self, url):
|
|||||||
name = None
|
name = None
|
||||||
desc = None
|
desc = None
|
||||||
entries = []
|
entries = []
|
||||||
for offset in compat_itertools_count(start=0, step=self._PAGE_SIZE):
|
for offset in itertools.count(start=0, step=self._PAGE_SIZE):
|
||||||
info = self.query_api(
|
info = self.query_api(
|
||||||
'dj/program/byradio?asc=false&limit=%d&radioId=%s&offset=%d'
|
'dj/program/byradio?asc=false&limit=%d&radioId=%s&offset=%d'
|
||||||
% (self._PAGE_SIZE, dj_id, offset),
|
% (self._PAGE_SIZE, dj_id, offset),
|
||||||
|
@ -1,11 +1,9 @@
|
|||||||
import json
|
import json
|
||||||
import re
|
import re
|
||||||
|
import urllib.parse
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import compat_HTTPError
|
||||||
compat_HTTPError,
|
|
||||||
compat_urllib_parse,
|
|
||||||
)
|
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
float_or_none,
|
float_or_none,
|
||||||
@ -125,7 +123,7 @@ def _real_extract(self, url):
|
|||||||
|
|
||||||
is_live = False
|
is_live = False
|
||||||
if ride_data.get('content_format') == 'audio':
|
if ride_data.get('content_format') == 'audio':
|
||||||
url = self._MANIFEST_URL_TEMPLATE % (ride_data.get('vod_stream_url'), compat_urllib_parse.quote(token))
|
url = self._MANIFEST_URL_TEMPLATE % (ride_data.get('vod_stream_url'), urllib.parse.quote(token))
|
||||||
formats = [{
|
formats = [{
|
||||||
'url': url,
|
'url': url,
|
||||||
'ext': 'm4a',
|
'ext': 'm4a',
|
||||||
@ -138,9 +136,9 @@ def _real_extract(self, url):
|
|||||||
url = 'https://members.onepeloton.com/.netlify/functions/m3u8-proxy?displayLanguage=en&acceptedSubtitles=%s&url=%s?hdnea=%s' % (
|
url = 'https://members.onepeloton.com/.netlify/functions/m3u8-proxy?displayLanguage=en&acceptedSubtitles=%s&url=%s?hdnea=%s' % (
|
||||||
','.join([re.sub('^([a-z]+)-([A-Z]+)$', r'\1', caption) for caption in ride_data['captions']]),
|
','.join([re.sub('^([a-z]+)-([A-Z]+)$', r'\1', caption) for caption in ride_data['captions']]),
|
||||||
ride_data['vod_stream_url'],
|
ride_data['vod_stream_url'],
|
||||||
compat_urllib_parse.quote(compat_urllib_parse.quote(token)))
|
urllib.parse.quote(urllib.parse.quote(token)))
|
||||||
elif ride_data.get('live_stream_url'):
|
elif ride_data.get('live_stream_url'):
|
||||||
url = self._MANIFEST_URL_TEMPLATE % (ride_data.get('live_stream_url'), compat_urllib_parse.quote(token))
|
url = self._MANIFEST_URL_TEMPLATE % (ride_data.get('live_stream_url'), urllib.parse.quote(token))
|
||||||
is_live = True
|
is_live = True
|
||||||
else:
|
else:
|
||||||
raise ExtractorError('Missing video URL')
|
raise ExtractorError('Missing video URL')
|
||||||
|
@ -1,14 +1,9 @@
|
|||||||
import re
|
import re
|
||||||
|
import urllib.parse
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import compat_urllib_parse_unquote
|
||||||
compat_urllib_parse_unquote,
|
from ..utils import ExtractorError, clean_html
|
||||||
compat_urllib_parse_unquote_plus,
|
|
||||||
)
|
|
||||||
from ..utils import (
|
|
||||||
clean_html,
|
|
||||||
ExtractorError,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class PlayvidIE(InfoExtractor):
|
class PlayvidIE(InfoExtractor):
|
||||||
@ -62,7 +57,7 @@ def _real_extract(self, url):
|
|||||||
val = videovars_match.group(2)
|
val = videovars_match.group(2)
|
||||||
|
|
||||||
if key == 'title':
|
if key == 'title':
|
||||||
video_title = compat_urllib_parse_unquote_plus(val)
|
video_title = urllib.parse.unquote_plus(val)
|
||||||
if key == 'duration':
|
if key == 'duration':
|
||||||
try:
|
try:
|
||||||
duration = int(val)
|
duration = int(val)
|
||||||
|
@ -1,8 +1,5 @@
|
|||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import compat_b64decode
|
||||||
compat_b64decode,
|
|
||||||
compat_chr,
|
|
||||||
)
|
|
||||||
from ..utils import int_or_none
|
from ..utils import int_or_none
|
||||||
|
|
||||||
|
|
||||||
@ -50,7 +47,7 @@ def _real_extract(self, url):
|
|||||||
c_ord += 13
|
c_ord += 13
|
||||||
if upper < c_ord:
|
if upper < c_ord:
|
||||||
c_ord -= 26
|
c_ord -= 26
|
||||||
loc_b64 += compat_chr(c_ord)
|
loc_b64 += chr(c_ord)
|
||||||
|
|
||||||
video_url = compat_b64decode(loc_b64).decode('utf-8')
|
video_url = compat_b64decode(loc_b64).decode('utf-8')
|
||||||
|
|
||||||
|
@ -3,29 +3,26 @@
|
|||||||
import math
|
import math
|
||||||
import operator
|
import operator
|
||||||
import re
|
import re
|
||||||
|
import urllib.request
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
|
||||||
compat_HTTPError,
|
|
||||||
compat_str,
|
|
||||||
compat_urllib_request,
|
|
||||||
)
|
|
||||||
from .openload import PhantomJSwrapper
|
from .openload import PhantomJSwrapper
|
||||||
|
from ..compat import compat_HTTPError, compat_str
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
NO_DEFAULT,
|
||||||
|
ExtractorError,
|
||||||
clean_html,
|
clean_html,
|
||||||
determine_ext,
|
determine_ext,
|
||||||
ExtractorError,
|
|
||||||
format_field,
|
format_field,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
merge_dicts,
|
merge_dicts,
|
||||||
NO_DEFAULT,
|
|
||||||
orderedSet,
|
orderedSet,
|
||||||
remove_quotes,
|
remove_quotes,
|
||||||
remove_start,
|
remove_start,
|
||||||
str_to_int,
|
str_to_int,
|
||||||
update_url_query,
|
update_url_query,
|
||||||
urlencode_postdata,
|
|
||||||
url_or_none,
|
url_or_none,
|
||||||
|
urlencode_postdata,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -50,7 +47,7 @@ def dl(*args, **kwargs):
|
|||||||
r'document\.location\.reload\(true\)')):
|
r'document\.location\.reload\(true\)')):
|
||||||
url_or_request = args[0]
|
url_or_request = args[0]
|
||||||
url = (url_or_request.get_full_url()
|
url = (url_or_request.get_full_url()
|
||||||
if isinstance(url_or_request, compat_urllib_request.Request)
|
if isinstance(url_or_request, urllib.request.Request)
|
||||||
else url_or_request)
|
else url_or_request)
|
||||||
phantom = PhantomJSwrapper(self, required_version='2.0')
|
phantom = PhantomJSwrapper(self, required_version='2.0')
|
||||||
phantom.get(url, html=webpage)
|
phantom.get(url, html=webpage)
|
||||||
|
@ -1,14 +1,12 @@
|
|||||||
import base64
|
import base64
|
||||||
import io
|
import io
|
||||||
|
import struct
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import compat_b64decode
|
||||||
compat_b64decode,
|
|
||||||
compat_struct_unpack,
|
|
||||||
)
|
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
determine_ext,
|
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
|
determine_ext,
|
||||||
float_or_none,
|
float_or_none,
|
||||||
qualities,
|
qualities,
|
||||||
remove_end,
|
remove_end,
|
||||||
@ -73,7 +71,7 @@ def _real_initialize(self):
|
|||||||
def _decrypt_url(png):
|
def _decrypt_url(png):
|
||||||
encrypted_data = io.BytesIO(compat_b64decode(png)[8:])
|
encrypted_data = io.BytesIO(compat_b64decode(png)[8:])
|
||||||
while True:
|
while True:
|
||||||
length = compat_struct_unpack('!I', encrypted_data.read(4))[0]
|
length = struct.unpack('!I', encrypted_data.read(4))[0]
|
||||||
chunk_type = encrypted_data.read(4)
|
chunk_type = encrypted_data.read(4)
|
||||||
if chunk_type == b'IEND':
|
if chunk_type == b'IEND':
|
||||||
break
|
break
|
||||||
|
@ -1,11 +1,8 @@
|
|||||||
|
import urllib.request
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import compat_parse_qs
|
||||||
compat_parse_qs,
|
from ..utils import ExtractorError
|
||||||
compat_urllib_request,
|
|
||||||
)
|
|
||||||
from ..utils import (
|
|
||||||
ExtractorError,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class ScreencastIE(InfoExtractor):
|
class ScreencastIE(InfoExtractor):
|
||||||
@ -75,7 +72,7 @@ def _real_extract(self, url):
|
|||||||
flash_vars_s = flash_vars_s.replace(',', '&')
|
flash_vars_s = flash_vars_s.replace(',', '&')
|
||||||
if flash_vars_s:
|
if flash_vars_s:
|
||||||
flash_vars = compat_parse_qs(flash_vars_s)
|
flash_vars = compat_parse_qs(flash_vars_s)
|
||||||
video_url_raw = compat_urllib_request.quote(
|
video_url_raw = urllib.request.quote(
|
||||||
flash_vars['content'][0])
|
flash_vars['content'][0])
|
||||||
video_url = video_url_raw.replace('http%3A', 'http:')
|
video_url = video_url_raw.replace('http%3A', 'http:')
|
||||||
|
|
||||||
|
@ -1,14 +1,15 @@
|
|||||||
|
|
||||||
|
|
||||||
|
import urllib.parse
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import compat_b64decode
|
||||||
compat_b64decode,
|
|
||||||
compat_urllib_parse_unquote_plus,
|
|
||||||
)
|
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
determine_ext,
|
KNOWN_EXTENSIONS,
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
|
determine_ext,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
js_to_json,
|
js_to_json,
|
||||||
KNOWN_EXTENSIONS,
|
|
||||||
parse_filesize,
|
parse_filesize,
|
||||||
rot47,
|
rot47,
|
||||||
url_or_none,
|
url_or_none,
|
||||||
@ -130,7 +131,7 @@ def decode_url_old(encoded_url):
|
|||||||
return stream_url
|
return stream_url
|
||||||
|
|
||||||
def decode_url(encoded_url):
|
def decode_url(encoded_url):
|
||||||
return rot47(compat_urllib_parse_unquote_plus(encoded_url))
|
return rot47(urllib.parse.unquote_plus(encoded_url))
|
||||||
|
|
||||||
return decode_url(self._parse_json(
|
return decode_url(self._parse_json(
|
||||||
self._search_regex(
|
self._search_regex(
|
||||||
|
@ -1,16 +1,12 @@
|
|||||||
import re
|
import re
|
||||||
|
import urllib.request
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import compat_HTTPError, compat_str, compat_urlparse
|
||||||
compat_HTTPError,
|
|
||||||
compat_str,
|
|
||||||
compat_urllib_request,
|
|
||||||
compat_urlparse,
|
|
||||||
)
|
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
ExtractorError,
|
||||||
determine_ext,
|
determine_ext,
|
||||||
extract_attributes,
|
extract_attributes,
|
||||||
ExtractorError,
|
|
||||||
float_or_none,
|
float_or_none,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
js_to_json,
|
js_to_json,
|
||||||
@ -155,7 +151,7 @@ def _download_json(self, url_or_request, *args, **kwargs):
|
|||||||
headers['X-Udemy-Bearer-Token'] = cookie.value
|
headers['X-Udemy-Bearer-Token'] = cookie.value
|
||||||
headers['X-Udemy-Authorization'] = 'Bearer %s' % cookie.value
|
headers['X-Udemy-Authorization'] = 'Bearer %s' % cookie.value
|
||||||
|
|
||||||
if isinstance(url_or_request, compat_urllib_request.Request):
|
if isinstance(url_or_request, urllib.request.Request):
|
||||||
for header, value in headers.items():
|
for header, value in headers.items():
|
||||||
url_or_request.add_header(header, value)
|
url_or_request.add_header(header, value)
|
||||||
else:
|
else:
|
||||||
|
@ -1,10 +1,9 @@
|
|||||||
|
|
||||||
|
|
||||||
|
import urllib.parse
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..utils import unified_strdate
|
||||||
compat_urllib_parse,
|
|
||||||
)
|
|
||||||
from ..utils import (
|
|
||||||
unified_strdate,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class UrortIE(InfoExtractor):
|
class UrortIE(InfoExtractor):
|
||||||
@ -31,7 +30,7 @@ class UrortIE(InfoExtractor):
|
|||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
playlist_id = self._match_id(url)
|
playlist_id = self._match_id(url)
|
||||||
|
|
||||||
fstr = compat_urllib_parse.quote("InternalBandUrl eq '%s'" % playlist_id)
|
fstr = urllib.parse.quote("InternalBandUrl eq '%s'" % playlist_id)
|
||||||
json_url = 'http://urort.p3.no/breeze/urort/TrackDTOViews?$filter=%s&$orderby=Released%%20desc&$expand=Tags%%2CFiles' % fstr
|
json_url = 'http://urort.p3.no/breeze/urort/TrackDTOViews?$filter=%s&$orderby=Released%%20desc&$expand=Tags%%2CFiles' % fstr
|
||||||
songs = self._download_json(json_url, playlist_id)
|
songs = self._download_json(json_url, playlist_id)
|
||||||
entries = []
|
entries = []
|
||||||
|
@ -1,8 +1,10 @@
|
|||||||
import random
|
import random
|
||||||
import re
|
import re
|
||||||
import string
|
import string
|
||||||
|
import struct
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
from ..compat import compat_b64decode, compat_ord
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
@ -14,11 +16,6 @@
|
|||||||
xpath_element,
|
xpath_element,
|
||||||
xpath_text,
|
xpath_text,
|
||||||
)
|
)
|
||||||
from ..compat import (
|
|
||||||
compat_b64decode,
|
|
||||||
compat_ord,
|
|
||||||
compat_struct_pack,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class VideaIE(InfoExtractor):
|
class VideaIE(InfoExtractor):
|
||||||
@ -102,7 +99,7 @@ def rc4(cipher_text, key):
|
|||||||
j = (j + S[i]) % 256
|
j = (j + S[i]) % 256
|
||||||
S[i], S[j] = S[j], S[i]
|
S[i], S[j] = S[j], S[i]
|
||||||
k = S[(S[i] + S[j]) % 256]
|
k = S[(S[i] + S[j]) % 256]
|
||||||
res += compat_struct_pack('B', k ^ compat_ord(cipher_text[m]))
|
res += struct.pack('B', k ^ compat_ord(cipher_text[m]))
|
||||||
|
|
||||||
return res.decode()
|
return res.decode()
|
||||||
|
|
||||||
|
@ -1,17 +1,14 @@
|
|||||||
import base64
|
import base64
|
||||||
import json
|
|
||||||
import hashlib
|
import hashlib
|
||||||
import hmac
|
import hmac
|
||||||
|
import json
|
||||||
import random
|
import random
|
||||||
import string
|
import string
|
||||||
import time
|
import time
|
||||||
|
import urllib.parse
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import compat_HTTPError, compat_urllib_parse_urlencode
|
||||||
compat_HTTPError,
|
|
||||||
compat_urllib_parse_urlencode,
|
|
||||||
compat_urllib_parse,
|
|
||||||
)
|
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
float_or_none,
|
float_or_none,
|
||||||
@ -46,12 +43,12 @@ def _call_api(self, path, video_id, note, data=None):
|
|||||||
headers['Content-Type'] = 'application/json'
|
headers['Content-Type'] = 'application/json'
|
||||||
base_string = '&'.join([
|
base_string = '&'.join([
|
||||||
'POST' if data else 'GET',
|
'POST' if data else 'GET',
|
||||||
compat_urllib_parse.quote(base_url, ''),
|
urllib.parse.quote(base_url, ''),
|
||||||
compat_urllib_parse.quote(encoded_query, '')])
|
urllib.parse.quote(encoded_query, '')])
|
||||||
oauth_signature = base64.b64encode(hmac.new(
|
oauth_signature = base64.b64encode(hmac.new(
|
||||||
(self._API_PARAMS['oAuthSecret'] + '&' + self._TOKEN_SECRET).encode('ascii'),
|
(self._API_PARAMS['oAuthSecret'] + '&' + self._TOKEN_SECRET).encode('ascii'),
|
||||||
base_string.encode(), hashlib.sha1).digest()).decode()
|
base_string.encode(), hashlib.sha1).digest()).decode()
|
||||||
encoded_query += '&oauth_signature=' + compat_urllib_parse.quote(oauth_signature, '')
|
encoded_query += '&oauth_signature=' + urllib.parse.quote(oauth_signature, '')
|
||||||
try:
|
try:
|
||||||
return self._download_json(
|
return self._download_json(
|
||||||
'?'.join([base_url, encoded_query]), video_id,
|
'?'.join([base_url, encoded_query]), video_id,
|
||||||
|
@ -1,11 +1,7 @@
|
|||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_chr
|
from ..utils import ExtractorError, decode_packed_codes
|
||||||
from ..utils import (
|
|
||||||
decode_packed_codes,
|
|
||||||
ExtractorError,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class VShareIE(InfoExtractor):
|
class VShareIE(InfoExtractor):
|
||||||
@ -37,7 +33,7 @@ def _extract_packed(self, webpage):
|
|||||||
digits = [int(digit) for digit in digits.split(',')]
|
digits = [int(digit) for digit in digits.split(',')]
|
||||||
key_digit = self._search_regex(
|
key_digit = self._search_regex(
|
||||||
r'fromCharCode\(.+?(\d+)\)}', unpacked, 'key digit')
|
r'fromCharCode\(.+?(\d+)\)}', unpacked, 'key digit')
|
||||||
chars = [compat_chr(d - int(key_digit)) for d in digits]
|
chars = [chr(d - int(key_digit)) for d in digits]
|
||||||
return ''.join(chars)
|
return ''.join(chars)
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
|
@ -1,11 +1,10 @@
|
|||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_chr
|
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
ExtractorError,
|
||||||
decode_packed_codes,
|
decode_packed_codes,
|
||||||
determine_ext,
|
determine_ext,
|
||||||
ExtractorError,
|
|
||||||
int_or_none,
|
int_or_none,
|
||||||
js_to_json,
|
js_to_json,
|
||||||
urlencode_postdata,
|
urlencode_postdata,
|
||||||
@ -32,11 +31,11 @@ def aa_decode(aa_code):
|
|||||||
aa_char = aa_char.replace('+ ', '')
|
aa_char = aa_char.replace('+ ', '')
|
||||||
m = re.match(r'^\d+', aa_char)
|
m = re.match(r'^\d+', aa_char)
|
||||||
if m:
|
if m:
|
||||||
ret += compat_chr(int(m.group(0), 8))
|
ret += chr(int(m.group(0), 8))
|
||||||
else:
|
else:
|
||||||
m = re.match(r'^u([\da-f]+)', aa_char)
|
m = re.match(r'^u([\da-f]+)', aa_char)
|
||||||
if m:
|
if m:
|
||||||
ret += compat_chr(int(m.group(1), 16))
|
ret += chr(int(m.group(1), 16))
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,15 +1,15 @@
|
|||||||
import hashlib
|
import hashlib
|
||||||
import itertools
|
import itertools
|
||||||
import re
|
import re
|
||||||
|
import urllib.parse
|
||||||
|
|
||||||
|
from .brightcove import BrightcoveNewIE
|
||||||
from .common import InfoExtractor, SearchInfoExtractor
|
from .common import InfoExtractor, SearchInfoExtractor
|
||||||
from ..compat import (
|
from .youtube import YoutubeIE
|
||||||
compat_str,
|
from ..compat import compat_str
|
||||||
compat_urllib_parse,
|
|
||||||
)
|
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
clean_html,
|
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
|
clean_html,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
mimetype2ext,
|
mimetype2ext,
|
||||||
parse_iso8601,
|
parse_iso8601,
|
||||||
@ -18,9 +18,6 @@
|
|||||||
url_or_none,
|
url_or_none,
|
||||||
)
|
)
|
||||||
|
|
||||||
from .brightcove import BrightcoveNewIE
|
|
||||||
from .youtube import YoutubeIE
|
|
||||||
|
|
||||||
|
|
||||||
class YahooIE(InfoExtractor):
|
class YahooIE(InfoExtractor):
|
||||||
IE_DESC = 'Yahoo screen and movies'
|
IE_DESC = 'Yahoo screen and movies'
|
||||||
@ -333,7 +330,7 @@ class YahooSearchIE(SearchInfoExtractor):
|
|||||||
|
|
||||||
def _search_results(self, query):
|
def _search_results(self, query):
|
||||||
for pagenum in itertools.count(0):
|
for pagenum in itertools.count(0):
|
||||||
result_url = 'http://video.search.yahoo.com/search/?p=%s&fr=screen&o=js&gs=0&b=%d' % (compat_urllib_parse.quote_plus(query), pagenum * 30)
|
result_url = 'http://video.search.yahoo.com/search/?p=%s&fr=screen&o=js&gs=0&b=%d' % (urllib.parse.quote_plus(query), pagenum * 30)
|
||||||
info = self._download_json(result_url, query,
|
info = self._download_json(result_url, query,
|
||||||
note='Downloading results page ' + str(pagenum + 1))
|
note='Downloading results page ' + str(pagenum + 1))
|
||||||
yield from (self.url_result(result['rurl']) for result in info['results'])
|
yield from (self.url_result(result['rurl']) for result in info['results'])
|
||||||
|
@ -1,8 +1,8 @@
|
|||||||
import re
|
|
||||||
import json
|
import json
|
||||||
|
import re
|
||||||
|
import urllib.parse
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_urllib_parse_unquote_plus
|
|
||||||
|
|
||||||
|
|
||||||
class YnetIE(InfoExtractor):
|
class YnetIE(InfoExtractor):
|
||||||
@ -31,7 +31,7 @@ def _real_extract(self, url):
|
|||||||
video_id = self._match_id(url)
|
video_id = self._match_id(url)
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
content = compat_urllib_parse_unquote_plus(self._og_search_video_url(webpage))
|
content = urllib.parse.unquote_plus(self._og_search_video_url(webpage))
|
||||||
config = json.loads(self._search_regex(r'config=({.+?})$', content, 'video config'))
|
config = json.loads(self._search_regex(r'config=({.+?})$', content, 'video config'))
|
||||||
f4m_url = config['clip']['url']
|
f4m_url = config['clip']['url']
|
||||||
title = self._og_search_title(webpage)
|
title = self._og_search_title(webpage)
|
||||||
|
@ -13,15 +13,14 @@
|
|||||||
import threading
|
import threading
|
||||||
import time
|
import time
|
||||||
import traceback
|
import traceback
|
||||||
|
import urllib.parse
|
||||||
|
|
||||||
from .common import InfoExtractor, SearchInfoExtractor
|
from .common import InfoExtractor, SearchInfoExtractor
|
||||||
from ..compat import functools # isort: split
|
from ..compat import functools # isort: split
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_chr,
|
|
||||||
compat_HTTPError,
|
compat_HTTPError,
|
||||||
compat_parse_qs,
|
compat_parse_qs,
|
||||||
compat_str,
|
compat_str,
|
||||||
compat_urllib_parse_unquote_plus,
|
|
||||||
compat_urllib_parse_urlencode,
|
compat_urllib_parse_urlencode,
|
||||||
compat_urllib_parse_urlparse,
|
compat_urllib_parse_urlparse,
|
||||||
compat_urlparse,
|
compat_urlparse,
|
||||||
@ -2483,7 +2482,7 @@ def _extract_signature_function(self, video_id, player_url, example_sig):
|
|||||||
if code:
|
if code:
|
||||||
res = self._parse_sig_js(code)
|
res = self._parse_sig_js(code)
|
||||||
|
|
||||||
test_string = ''.join(map(compat_chr, range(len(example_sig))))
|
test_string = ''.join(map(chr, range(len(example_sig))))
|
||||||
cache_res = res(test_string)
|
cache_res = res(test_string)
|
||||||
cache_spec = [ord(c) for c in cache_res]
|
cache_spec = [ord(c) for c in cache_res]
|
||||||
|
|
||||||
@ -2522,7 +2521,7 @@ def _genslice(start, end, step):
|
|||||||
else:
|
else:
|
||||||
yield _genslice(start, i, step)
|
yield _genslice(start, i, step)
|
||||||
|
|
||||||
test_string = ''.join(map(compat_chr, range(len(example_sig))))
|
test_string = ''.join(map(chr, range(len(example_sig))))
|
||||||
cache_res = func(test_string)
|
cache_res = func(test_string)
|
||||||
cache_spec = [ord(c) for c in cache_res]
|
cache_spec = [ord(c) for c in cache_res]
|
||||||
expr_code = ' + '.join(gen_sig_code(cache_spec))
|
expr_code = ' + '.join(gen_sig_code(cache_spec))
|
||||||
@ -3421,7 +3420,7 @@ def _real_extract(self, url):
|
|||||||
# fields may contain comma as well (see
|
# fields may contain comma as well (see
|
||||||
# https://github.com/ytdl-org/youtube-dl/issues/8536)
|
# https://github.com/ytdl-org/youtube-dl/issues/8536)
|
||||||
feed_data = compat_parse_qs(
|
feed_data = compat_parse_qs(
|
||||||
compat_urllib_parse_unquote_plus(feed))
|
urllib.parse.unquote_plus(feed))
|
||||||
|
|
||||||
def feed_entry(name):
|
def feed_entry(name):
|
||||||
return try_get(
|
return try_get(
|
||||||
@ -5846,7 +5845,7 @@ def _real_extract(self, url):
|
|||||||
if params:
|
if params:
|
||||||
section = next((k for k, v in self._SECTIONS.items() if v == params), params)
|
section = next((k for k, v in self._SECTIONS.items() if v == params), params)
|
||||||
else:
|
else:
|
||||||
section = compat_urllib_parse_unquote_plus((url.split('#') + [''])[1]).lower()
|
section = urllib.parse.unquote_plus((url.split('#') + [''])[1]).lower()
|
||||||
params = self._SECTIONS.get(section)
|
params = self._SECTIONS.get(section)
|
||||||
if not params:
|
if not params:
|
||||||
section = None
|
section = None
|
||||||
|
@ -4,10 +4,11 @@
|
|||||||
import os.path
|
import os.path
|
||||||
import re
|
import re
|
||||||
import shlex
|
import shlex
|
||||||
|
import shutil
|
||||||
import string
|
import string
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
from .compat import compat_expanduser, compat_get_terminal_size, compat_getenv
|
from .compat import compat_expanduser
|
||||||
from .cookies import SUPPORTED_BROWSERS, SUPPORTED_KEYRINGS
|
from .cookies import SUPPORTED_BROWSERS, SUPPORTED_KEYRINGS
|
||||||
from .downloader.external import list_external_downloaders
|
from .downloader.external import list_external_downloaders
|
||||||
from .postprocessor import (
|
from .postprocessor import (
|
||||||
@ -39,7 +40,7 @@ def parseOpts(overrideArguments=None, ignore_config_files='if_override'):
|
|||||||
|
|
||||||
def _readUserConf(package_name, default=[]):
|
def _readUserConf(package_name, default=[]):
|
||||||
# .config
|
# .config
|
||||||
xdg_config_home = compat_getenv('XDG_CONFIG_HOME') or compat_expanduser('~/.config')
|
xdg_config_home = os.getenv('XDG_CONFIG_HOME') or compat_expanduser('~/.config')
|
||||||
userConfFile = os.path.join(xdg_config_home, package_name, 'config')
|
userConfFile = os.path.join(xdg_config_home, package_name, 'config')
|
||||||
if not os.path.isfile(userConfFile):
|
if not os.path.isfile(userConfFile):
|
||||||
userConfFile = os.path.join(xdg_config_home, '%s.conf' % package_name)
|
userConfFile = os.path.join(xdg_config_home, '%s.conf' % package_name)
|
||||||
@ -48,7 +49,7 @@ def _readUserConf(package_name, default=[]):
|
|||||||
return userConf, userConfFile
|
return userConf, userConfFile
|
||||||
|
|
||||||
# appdata
|
# appdata
|
||||||
appdata_dir = compat_getenv('appdata')
|
appdata_dir = os.getenv('appdata')
|
||||||
if appdata_dir:
|
if appdata_dir:
|
||||||
userConfFile = os.path.join(appdata_dir, package_name, 'config')
|
userConfFile = os.path.join(appdata_dir, package_name, 'config')
|
||||||
userConf = Config.read_file(userConfFile, default=None)
|
userConf = Config.read_file(userConfFile, default=None)
|
||||||
@ -137,7 +138,7 @@ def load_configs():
|
|||||||
class _YoutubeDLHelpFormatter(optparse.IndentedHelpFormatter):
|
class _YoutubeDLHelpFormatter(optparse.IndentedHelpFormatter):
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
# No need to wrap help messages if we're on a wide console
|
# No need to wrap help messages if we're on a wide console
|
||||||
max_width = compat_get_terminal_size().columns or 80
|
max_width = shutil.get_terminal_size().columns or 80
|
||||||
# The % is chosen to get a pretty output in README.md
|
# The % is chosen to get a pretty output in README.md
|
||||||
super().__init__(width=max_width, max_help_position=int(0.45 * max_width))
|
super().__init__(width=max_width, max_help_position=int(0.45 * max_width))
|
||||||
|
|
||||||
|
@ -8,8 +8,9 @@
|
|||||||
|
|
||||||
import collections
|
import collections
|
||||||
import socket
|
import socket
|
||||||
|
import struct
|
||||||
|
|
||||||
from .compat import compat_ord, compat_struct_pack, compat_struct_unpack
|
from .compat import compat_ord
|
||||||
|
|
||||||
__author__ = 'Timo Schmid <coding@timoschmid.de>'
|
__author__ = 'Timo Schmid <coding@timoschmid.de>'
|
||||||
|
|
||||||
@ -19,7 +20,7 @@
|
|||||||
# if the client cannot resolve the destination host's domain name to find its
|
# if the client cannot resolve the destination host's domain name to find its
|
||||||
# IP address, it should set the first three bytes of DSTIP to NULL and the last
|
# IP address, it should set the first three bytes of DSTIP to NULL and the last
|
||||||
# byte to a non-zero value.
|
# byte to a non-zero value.
|
||||||
SOCKS4_DEFAULT_DSTIP = compat_struct_pack('!BBBB', 0, 0, 0, 0xFF)
|
SOCKS4_DEFAULT_DSTIP = struct.pack('!BBBB', 0, 0, 0, 0xFF)
|
||||||
|
|
||||||
SOCKS5_VERSION = 5
|
SOCKS5_VERSION = 5
|
||||||
SOCKS5_USER_AUTH_VERSION = 0x01
|
SOCKS5_USER_AUTH_VERSION = 0x01
|
||||||
@ -122,11 +123,11 @@ def recvall(self, cnt):
|
|||||||
|
|
||||||
def _recv_bytes(self, cnt):
|
def _recv_bytes(self, cnt):
|
||||||
data = self.recvall(cnt)
|
data = self.recvall(cnt)
|
||||||
return compat_struct_unpack(f'!{cnt}B', data)
|
return struct.unpack(f'!{cnt}B', data)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _len_and_data(data):
|
def _len_and_data(data):
|
||||||
return compat_struct_pack('!B', len(data)) + data
|
return struct.pack('!B', len(data)) + data
|
||||||
|
|
||||||
def _check_response_version(self, expected_version, got_version):
|
def _check_response_version(self, expected_version, got_version):
|
||||||
if got_version != expected_version:
|
if got_version != expected_version:
|
||||||
@ -147,7 +148,7 @@ def _setup_socks4(self, address, is_4a=False):
|
|||||||
|
|
||||||
ipaddr = self._resolve_address(destaddr, SOCKS4_DEFAULT_DSTIP, use_remote_dns=is_4a)
|
ipaddr = self._resolve_address(destaddr, SOCKS4_DEFAULT_DSTIP, use_remote_dns=is_4a)
|
||||||
|
|
||||||
packet = compat_struct_pack('!BBH', SOCKS4_VERSION, Socks4Command.CMD_CONNECT, port) + ipaddr
|
packet = struct.pack('!BBH', SOCKS4_VERSION, Socks4Command.CMD_CONNECT, port) + ipaddr
|
||||||
|
|
||||||
username = (self._proxy.username or '').encode()
|
username = (self._proxy.username or '').encode()
|
||||||
packet += username + b'\x00'
|
packet += username + b'\x00'
|
||||||
@ -157,7 +158,7 @@ def _setup_socks4(self, address, is_4a=False):
|
|||||||
|
|
||||||
self.sendall(packet)
|
self.sendall(packet)
|
||||||
|
|
||||||
version, resp_code, dstport, dsthost = compat_struct_unpack('!BBHI', self.recvall(8))
|
version, resp_code, dstport, dsthost = struct.unpack('!BBHI', self.recvall(8))
|
||||||
|
|
||||||
self._check_response_version(SOCKS4_REPLY_VERSION, version)
|
self._check_response_version(SOCKS4_REPLY_VERSION, version)
|
||||||
|
|
||||||
@ -171,14 +172,14 @@ def _setup_socks4a(self, address):
|
|||||||
self._setup_socks4(address, is_4a=True)
|
self._setup_socks4(address, is_4a=True)
|
||||||
|
|
||||||
def _socks5_auth(self):
|
def _socks5_auth(self):
|
||||||
packet = compat_struct_pack('!B', SOCKS5_VERSION)
|
packet = struct.pack('!B', SOCKS5_VERSION)
|
||||||
|
|
||||||
auth_methods = [Socks5Auth.AUTH_NONE]
|
auth_methods = [Socks5Auth.AUTH_NONE]
|
||||||
if self._proxy.username and self._proxy.password:
|
if self._proxy.username and self._proxy.password:
|
||||||
auth_methods.append(Socks5Auth.AUTH_USER_PASS)
|
auth_methods.append(Socks5Auth.AUTH_USER_PASS)
|
||||||
|
|
||||||
packet += compat_struct_pack('!B', len(auth_methods))
|
packet += struct.pack('!B', len(auth_methods))
|
||||||
packet += compat_struct_pack(f'!{len(auth_methods)}B', *auth_methods)
|
packet += struct.pack(f'!{len(auth_methods)}B', *auth_methods)
|
||||||
|
|
||||||
self.sendall(packet)
|
self.sendall(packet)
|
||||||
|
|
||||||
@ -194,7 +195,7 @@ def _socks5_auth(self):
|
|||||||
if method == Socks5Auth.AUTH_USER_PASS:
|
if method == Socks5Auth.AUTH_USER_PASS:
|
||||||
username = self._proxy.username.encode()
|
username = self._proxy.username.encode()
|
||||||
password = self._proxy.password.encode()
|
password = self._proxy.password.encode()
|
||||||
packet = compat_struct_pack('!B', SOCKS5_USER_AUTH_VERSION)
|
packet = struct.pack('!B', SOCKS5_USER_AUTH_VERSION)
|
||||||
packet += self._len_and_data(username) + self._len_and_data(password)
|
packet += self._len_and_data(username) + self._len_and_data(password)
|
||||||
self.sendall(packet)
|
self.sendall(packet)
|
||||||
|
|
||||||
@ -214,14 +215,14 @@ def _setup_socks5(self, address):
|
|||||||
self._socks5_auth()
|
self._socks5_auth()
|
||||||
|
|
||||||
reserved = 0
|
reserved = 0
|
||||||
packet = compat_struct_pack('!BBB', SOCKS5_VERSION, Socks5Command.CMD_CONNECT, reserved)
|
packet = struct.pack('!BBB', SOCKS5_VERSION, Socks5Command.CMD_CONNECT, reserved)
|
||||||
if ipaddr is None:
|
if ipaddr is None:
|
||||||
destaddr = destaddr.encode()
|
destaddr = destaddr.encode()
|
||||||
packet += compat_struct_pack('!B', Socks5AddressType.ATYP_DOMAINNAME)
|
packet += struct.pack('!B', Socks5AddressType.ATYP_DOMAINNAME)
|
||||||
packet += self._len_and_data(destaddr)
|
packet += self._len_and_data(destaddr)
|
||||||
else:
|
else:
|
||||||
packet += compat_struct_pack('!B', Socks5AddressType.ATYP_IPV4) + ipaddr
|
packet += struct.pack('!B', Socks5AddressType.ATYP_IPV4) + ipaddr
|
||||||
packet += compat_struct_pack('!H', port)
|
packet += struct.pack('!H', port)
|
||||||
|
|
||||||
self.sendall(packet)
|
self.sendall(packet)
|
||||||
|
|
||||||
@ -240,7 +241,7 @@ def _setup_socks5(self, address):
|
|||||||
destaddr = self.recvall(alen)
|
destaddr = self.recvall(alen)
|
||||||
elif atype == Socks5AddressType.ATYP_IPV6:
|
elif atype == Socks5AddressType.ATYP_IPV6:
|
||||||
destaddr = self.recvall(16)
|
destaddr = self.recvall(16)
|
||||||
destport = compat_struct_unpack('!H', self.recvall(2))[0]
|
destport = struct.unpack('!H', self.recvall(2))[0]
|
||||||
|
|
||||||
return (destaddr, destport)
|
return (destaddr, destport)
|
||||||
|
|
||||||
|
124
yt_dlp/utils.py
124
yt_dlp/utils.py
@ -14,6 +14,8 @@
|
|||||||
import gzip
|
import gzip
|
||||||
import hashlib
|
import hashlib
|
||||||
import hmac
|
import hmac
|
||||||
|
import html.entities
|
||||||
|
import html.parser
|
||||||
import importlib.util
|
import importlib.util
|
||||||
import io
|
import io
|
||||||
import itertools
|
import itertools
|
||||||
@ -29,6 +31,7 @@
|
|||||||
import shlex
|
import shlex
|
||||||
import socket
|
import socket
|
||||||
import ssl
|
import ssl
|
||||||
|
import struct
|
||||||
import subprocess
|
import subprocess
|
||||||
import sys
|
import sys
|
||||||
import tempfile
|
import tempfile
|
||||||
@ -36,35 +39,27 @@
|
|||||||
import traceback
|
import traceback
|
||||||
import types
|
import types
|
||||||
import urllib.parse
|
import urllib.parse
|
||||||
|
import urllib.request
|
||||||
import xml.etree.ElementTree
|
import xml.etree.ElementTree
|
||||||
import zlib
|
import zlib
|
||||||
|
import http.client
|
||||||
|
import http.cookiejar
|
||||||
|
|
||||||
from .compat import asyncio, functools # isort: split
|
from .compat import asyncio, functools # isort: split
|
||||||
from .compat import (
|
from .compat import (
|
||||||
compat_chr,
|
|
||||||
compat_cookiejar,
|
|
||||||
compat_etree_fromstring,
|
compat_etree_fromstring,
|
||||||
compat_expanduser,
|
compat_expanduser,
|
||||||
compat_html_entities,
|
|
||||||
compat_html_entities_html5,
|
|
||||||
compat_HTMLParseError,
|
compat_HTMLParseError,
|
||||||
compat_HTMLParser,
|
|
||||||
compat_http_client,
|
|
||||||
compat_HTTPError,
|
compat_HTTPError,
|
||||||
compat_os_name,
|
compat_os_name,
|
||||||
compat_parse_qs,
|
compat_parse_qs,
|
||||||
compat_shlex_quote,
|
compat_shlex_quote,
|
||||||
compat_str,
|
compat_str,
|
||||||
compat_struct_pack,
|
|
||||||
compat_struct_unpack,
|
|
||||||
compat_urllib_error,
|
|
||||||
compat_urllib_parse_unquote_plus,
|
|
||||||
compat_urllib_parse_urlencode,
|
compat_urllib_parse_urlencode,
|
||||||
compat_urllib_parse_urlparse,
|
compat_urllib_parse_urlparse,
|
||||||
compat_urllib_request,
|
|
||||||
compat_urlparse,
|
compat_urlparse,
|
||||||
)
|
)
|
||||||
from .dependencies import brotli, certifi, websockets
|
from .dependencies import brotli, certifi, websockets, xattr
|
||||||
from .socks import ProxyType, sockssocket
|
from .socks import ProxyType, sockssocket
|
||||||
|
|
||||||
|
|
||||||
@ -445,7 +440,7 @@ def get_elements_text_and_html_by_attribute(attribute, value, html, escape_value
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class HTMLBreakOnClosingTagParser(compat_HTMLParser):
|
class HTMLBreakOnClosingTagParser(html.parser.HTMLParser):
|
||||||
"""
|
"""
|
||||||
HTML parser which raises HTMLBreakOnClosingTagException upon reaching the
|
HTML parser which raises HTMLBreakOnClosingTagException upon reaching the
|
||||||
closing tag for the first opening tag it has encountered, and can be used
|
closing tag for the first opening tag it has encountered, and can be used
|
||||||
@ -457,7 +452,7 @@ class HTMLBreakOnClosingTagException(Exception):
|
|||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.tagstack = collections.deque()
|
self.tagstack = collections.deque()
|
||||||
compat_HTMLParser.__init__(self)
|
html.parser.HTMLParser.__init__(self)
|
||||||
|
|
||||||
def __enter__(self):
|
def __enter__(self):
|
||||||
return self
|
return self
|
||||||
@ -522,22 +517,22 @@ def find_or_raise(haystack, needle, exc):
|
|||||||
raise compat_HTMLParseError('unexpected end of html')
|
raise compat_HTMLParseError('unexpected end of html')
|
||||||
|
|
||||||
|
|
||||||
class HTMLAttributeParser(compat_HTMLParser):
|
class HTMLAttributeParser(html.parser.HTMLParser):
|
||||||
"""Trivial HTML parser to gather the attributes for a single element"""
|
"""Trivial HTML parser to gather the attributes for a single element"""
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.attrs = {}
|
self.attrs = {}
|
||||||
compat_HTMLParser.__init__(self)
|
html.parser.HTMLParser.__init__(self)
|
||||||
|
|
||||||
def handle_starttag(self, tag, attrs):
|
def handle_starttag(self, tag, attrs):
|
||||||
self.attrs = dict(attrs)
|
self.attrs = dict(attrs)
|
||||||
|
|
||||||
|
|
||||||
class HTMLListAttrsParser(compat_HTMLParser):
|
class HTMLListAttrsParser(html.parser.HTMLParser):
|
||||||
"""HTML parser to gather the attributes for the elements of a list"""
|
"""HTML parser to gather the attributes for the elements of a list"""
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
compat_HTMLParser.__init__(self)
|
html.parser.HTMLParser.__init__(self)
|
||||||
self.items = []
|
self.items = []
|
||||||
self._level = 0
|
self._level = 0
|
||||||
|
|
||||||
@ -763,7 +758,7 @@ def sanitized_Request(url, *args, **kwargs):
|
|||||||
if auth_header is not None:
|
if auth_header is not None:
|
||||||
headers = args[1] if len(args) >= 2 else kwargs.setdefault('headers', {})
|
headers = args[1] if len(args) >= 2 else kwargs.setdefault('headers', {})
|
||||||
headers['Authorization'] = auth_header
|
headers['Authorization'] = auth_header
|
||||||
return compat_urllib_request.Request(url, *args, **kwargs)
|
return urllib.request.Request(url, *args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
def expand_path(s):
|
def expand_path(s):
|
||||||
@ -788,13 +783,13 @@ def _htmlentity_transform(entity_with_semicolon):
|
|||||||
entity = entity_with_semicolon[:-1]
|
entity = entity_with_semicolon[:-1]
|
||||||
|
|
||||||
# Known non-numeric HTML entity
|
# Known non-numeric HTML entity
|
||||||
if entity in compat_html_entities.name2codepoint:
|
if entity in html.entities.name2codepoint:
|
||||||
return compat_chr(compat_html_entities.name2codepoint[entity])
|
return chr(html.entities.name2codepoint[entity])
|
||||||
|
|
||||||
# TODO: HTML5 allows entities without a semicolon. For example,
|
# TODO: HTML5 allows entities without a semicolon. For example,
|
||||||
# 'Éric' should be decoded as 'Éric'.
|
# 'Éric' should be decoded as 'Éric'.
|
||||||
if entity_with_semicolon in compat_html_entities_html5:
|
if entity_with_semicolon in html.entities.html5:
|
||||||
return compat_html_entities_html5[entity_with_semicolon]
|
return html.entities.html5[entity_with_semicolon]
|
||||||
|
|
||||||
mobj = re.match(r'#(x[0-9a-fA-F]+|[0-9]+)', entity)
|
mobj = re.match(r'#(x[0-9a-fA-F]+|[0-9]+)', entity)
|
||||||
if mobj is not None:
|
if mobj is not None:
|
||||||
@ -806,7 +801,7 @@ def _htmlentity_transform(entity_with_semicolon):
|
|||||||
base = 10
|
base = 10
|
||||||
# See https://github.com/ytdl-org/youtube-dl/issues/7518
|
# See https://github.com/ytdl-org/youtube-dl/issues/7518
|
||||||
with contextlib.suppress(ValueError):
|
with contextlib.suppress(ValueError):
|
||||||
return compat_chr(int(numstr, base))
|
return chr(int(numstr, base))
|
||||||
|
|
||||||
# Unknown entity in name, return its literal representation
|
# Unknown entity in name, return its literal representation
|
||||||
return '&%s;' % entity
|
return '&%s;' % entity
|
||||||
@ -1015,7 +1010,7 @@ def __init__(self, msg=None):
|
|||||||
super().__init__(self.msg)
|
super().__init__(self.msg)
|
||||||
|
|
||||||
|
|
||||||
network_exceptions = [compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error]
|
network_exceptions = [urllib.error.URLError, http.client.HTTPException, socket.error]
|
||||||
if hasattr(ssl, 'CertificateError'):
|
if hasattr(ssl, 'CertificateError'):
|
||||||
network_exceptions.append(ssl.CertificateError)
|
network_exceptions.append(ssl.CertificateError)
|
||||||
network_exceptions = tuple(network_exceptions)
|
network_exceptions = tuple(network_exceptions)
|
||||||
@ -1267,7 +1262,7 @@ def handle_youtubedl_headers(headers):
|
|||||||
return filtered_headers
|
return filtered_headers
|
||||||
|
|
||||||
|
|
||||||
class YoutubeDLHandler(compat_urllib_request.HTTPHandler):
|
class YoutubeDLHandler(urllib.request.HTTPHandler):
|
||||||
"""Handler for HTTP requests and responses.
|
"""Handler for HTTP requests and responses.
|
||||||
|
|
||||||
This class, when installed with an OpenerDirector, automatically adds
|
This class, when installed with an OpenerDirector, automatically adds
|
||||||
@ -1286,11 +1281,11 @@ class YoutubeDLHandler(compat_urllib_request.HTTPHandler):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, params, *args, **kwargs):
|
def __init__(self, params, *args, **kwargs):
|
||||||
compat_urllib_request.HTTPHandler.__init__(self, *args, **kwargs)
|
urllib.request.HTTPHandler.__init__(self, *args, **kwargs)
|
||||||
self._params = params
|
self._params = params
|
||||||
|
|
||||||
def http_open(self, req):
|
def http_open(self, req):
|
||||||
conn_class = compat_http_client.HTTPConnection
|
conn_class = http.client.HTTPConnection
|
||||||
|
|
||||||
socks_proxy = req.headers.get('Ytdl-socks-proxy')
|
socks_proxy = req.headers.get('Ytdl-socks-proxy')
|
||||||
if socks_proxy:
|
if socks_proxy:
|
||||||
@ -1365,18 +1360,18 @@ def http_response(self, req, resp):
|
|||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
raise original_ioerror
|
raise original_ioerror
|
||||||
resp = compat_urllib_request.addinfourl(uncompressed, old_resp.headers, old_resp.url, old_resp.code)
|
resp = urllib.request.addinfourl(uncompressed, old_resp.headers, old_resp.url, old_resp.code)
|
||||||
resp.msg = old_resp.msg
|
resp.msg = old_resp.msg
|
||||||
del resp.headers['Content-encoding']
|
del resp.headers['Content-encoding']
|
||||||
# deflate
|
# deflate
|
||||||
if resp.headers.get('Content-encoding', '') == 'deflate':
|
if resp.headers.get('Content-encoding', '') == 'deflate':
|
||||||
gz = io.BytesIO(self.deflate(resp.read()))
|
gz = io.BytesIO(self.deflate(resp.read()))
|
||||||
resp = compat_urllib_request.addinfourl(gz, old_resp.headers, old_resp.url, old_resp.code)
|
resp = urllib.request.addinfourl(gz, old_resp.headers, old_resp.url, old_resp.code)
|
||||||
resp.msg = old_resp.msg
|
resp.msg = old_resp.msg
|
||||||
del resp.headers['Content-encoding']
|
del resp.headers['Content-encoding']
|
||||||
# brotli
|
# brotli
|
||||||
if resp.headers.get('Content-encoding', '') == 'br':
|
if resp.headers.get('Content-encoding', '') == 'br':
|
||||||
resp = compat_urllib_request.addinfourl(
|
resp = urllib.request.addinfourl(
|
||||||
io.BytesIO(self.brotli(resp.read())), old_resp.headers, old_resp.url, old_resp.code)
|
io.BytesIO(self.brotli(resp.read())), old_resp.headers, old_resp.url, old_resp.code)
|
||||||
resp.msg = old_resp.msg
|
resp.msg = old_resp.msg
|
||||||
del resp.headers['Content-encoding']
|
del resp.headers['Content-encoding']
|
||||||
@ -1399,7 +1394,7 @@ def http_response(self, req, resp):
|
|||||||
|
|
||||||
def make_socks_conn_class(base_class, socks_proxy):
|
def make_socks_conn_class(base_class, socks_proxy):
|
||||||
assert issubclass(base_class, (
|
assert issubclass(base_class, (
|
||||||
compat_http_client.HTTPConnection, compat_http_client.HTTPSConnection))
|
http.client.HTTPConnection, http.client.HTTPSConnection))
|
||||||
|
|
||||||
url_components = compat_urlparse.urlparse(socks_proxy)
|
url_components = compat_urlparse.urlparse(socks_proxy)
|
||||||
if url_components.scheme.lower() == 'socks5':
|
if url_components.scheme.lower() == 'socks5':
|
||||||
@ -1412,7 +1407,7 @@ def make_socks_conn_class(base_class, socks_proxy):
|
|||||||
def unquote_if_non_empty(s):
|
def unquote_if_non_empty(s):
|
||||||
if not s:
|
if not s:
|
||||||
return s
|
return s
|
||||||
return compat_urllib_parse_unquote_plus(s)
|
return urllib.parse.unquote_plus(s)
|
||||||
|
|
||||||
proxy_args = (
|
proxy_args = (
|
||||||
socks_type,
|
socks_type,
|
||||||
@ -1430,7 +1425,7 @@ def connect(self):
|
|||||||
self.sock.settimeout(self.timeout)
|
self.sock.settimeout(self.timeout)
|
||||||
self.sock.connect((self.host, self.port))
|
self.sock.connect((self.host, self.port))
|
||||||
|
|
||||||
if isinstance(self, compat_http_client.HTTPSConnection):
|
if isinstance(self, http.client.HTTPSConnection):
|
||||||
if hasattr(self, '_context'): # Python > 2.6
|
if hasattr(self, '_context'): # Python > 2.6
|
||||||
self.sock = self._context.wrap_socket(
|
self.sock = self._context.wrap_socket(
|
||||||
self.sock, server_hostname=self.host)
|
self.sock, server_hostname=self.host)
|
||||||
@ -1440,10 +1435,10 @@ def connect(self):
|
|||||||
return SocksConnection
|
return SocksConnection
|
||||||
|
|
||||||
|
|
||||||
class YoutubeDLHTTPSHandler(compat_urllib_request.HTTPSHandler):
|
class YoutubeDLHTTPSHandler(urllib.request.HTTPSHandler):
|
||||||
def __init__(self, params, https_conn_class=None, *args, **kwargs):
|
def __init__(self, params, https_conn_class=None, *args, **kwargs):
|
||||||
compat_urllib_request.HTTPSHandler.__init__(self, *args, **kwargs)
|
urllib.request.HTTPSHandler.__init__(self, *args, **kwargs)
|
||||||
self._https_conn_class = https_conn_class or compat_http_client.HTTPSConnection
|
self._https_conn_class = https_conn_class or http.client.HTTPSConnection
|
||||||
self._params = params
|
self._params = params
|
||||||
|
|
||||||
def https_open(self, req):
|
def https_open(self, req):
|
||||||
@ -1470,7 +1465,7 @@ def https_open(self, req):
|
|||||||
raise
|
raise
|
||||||
|
|
||||||
|
|
||||||
class YoutubeDLCookieJar(compat_cookiejar.MozillaCookieJar):
|
class YoutubeDLCookieJar(http.cookiejar.MozillaCookieJar):
|
||||||
"""
|
"""
|
||||||
See [1] for cookie file format.
|
See [1] for cookie file format.
|
||||||
|
|
||||||
@ -1541,7 +1536,7 @@ def save(self, filename=None, *args, **kwargs):
|
|||||||
if self.filename is not None:
|
if self.filename is not None:
|
||||||
filename = self.filename
|
filename = self.filename
|
||||||
else:
|
else:
|
||||||
raise ValueError(compat_cookiejar.MISSING_FILENAME_TEXT)
|
raise ValueError(http.cookiejar.MISSING_FILENAME_TEXT)
|
||||||
|
|
||||||
# Store session cookies with `expires` set to 0 instead of an empty string
|
# Store session cookies with `expires` set to 0 instead of an empty string
|
||||||
for cookie in self:
|
for cookie in self:
|
||||||
@ -1558,7 +1553,7 @@ def load(self, filename=None, ignore_discard=False, ignore_expires=False):
|
|||||||
if self.filename is not None:
|
if self.filename is not None:
|
||||||
filename = self.filename
|
filename = self.filename
|
||||||
else:
|
else:
|
||||||
raise ValueError(compat_cookiejar.MISSING_FILENAME_TEXT)
|
raise ValueError(http.cookiejar.MISSING_FILENAME_TEXT)
|
||||||
|
|
||||||
def prepare_line(line):
|
def prepare_line(line):
|
||||||
if line.startswith(self._HTTPONLY_PREFIX):
|
if line.startswith(self._HTTPONLY_PREFIX):
|
||||||
@ -1568,10 +1563,10 @@ def prepare_line(line):
|
|||||||
return line
|
return line
|
||||||
cookie_list = line.split('\t')
|
cookie_list = line.split('\t')
|
||||||
if len(cookie_list) != self._ENTRY_LEN:
|
if len(cookie_list) != self._ENTRY_LEN:
|
||||||
raise compat_cookiejar.LoadError('invalid length %d' % len(cookie_list))
|
raise http.cookiejar.LoadError('invalid length %d' % len(cookie_list))
|
||||||
cookie = self._CookieFileEntry(*cookie_list)
|
cookie = self._CookieFileEntry(*cookie_list)
|
||||||
if cookie.expires_at and not cookie.expires_at.isdigit():
|
if cookie.expires_at and not cookie.expires_at.isdigit():
|
||||||
raise compat_cookiejar.LoadError('invalid expires at %s' % cookie.expires_at)
|
raise http.cookiejar.LoadError('invalid expires at %s' % cookie.expires_at)
|
||||||
return line
|
return line
|
||||||
|
|
||||||
cf = io.StringIO()
|
cf = io.StringIO()
|
||||||
@ -1579,9 +1574,9 @@ def prepare_line(line):
|
|||||||
for line in f:
|
for line in f:
|
||||||
try:
|
try:
|
||||||
cf.write(prepare_line(line))
|
cf.write(prepare_line(line))
|
||||||
except compat_cookiejar.LoadError as e:
|
except http.cookiejar.LoadError as e:
|
||||||
if f'{line.strip()} '[0] in '[{"':
|
if f'{line.strip()} '[0] in '[{"':
|
||||||
raise compat_cookiejar.LoadError(
|
raise http.cookiejar.LoadError(
|
||||||
'Cookies file must be Netscape formatted, not JSON. See '
|
'Cookies file must be Netscape formatted, not JSON. See '
|
||||||
'https://github.com/ytdl-org/youtube-dl#how-do-i-pass-cookies-to-youtube-dl')
|
'https://github.com/ytdl-org/youtube-dl#how-do-i-pass-cookies-to-youtube-dl')
|
||||||
write_string(f'WARNING: skipping cookie file entry due to {e}: {line!r}\n')
|
write_string(f'WARNING: skipping cookie file entry due to {e}: {line!r}\n')
|
||||||
@ -1604,18 +1599,18 @@ def prepare_line(line):
|
|||||||
cookie.discard = True
|
cookie.discard = True
|
||||||
|
|
||||||
|
|
||||||
class YoutubeDLCookieProcessor(compat_urllib_request.HTTPCookieProcessor):
|
class YoutubeDLCookieProcessor(urllib.request.HTTPCookieProcessor):
|
||||||
def __init__(self, cookiejar=None):
|
def __init__(self, cookiejar=None):
|
||||||
compat_urllib_request.HTTPCookieProcessor.__init__(self, cookiejar)
|
urllib.request.HTTPCookieProcessor.__init__(self, cookiejar)
|
||||||
|
|
||||||
def http_response(self, request, response):
|
def http_response(self, request, response):
|
||||||
return compat_urllib_request.HTTPCookieProcessor.http_response(self, request, response)
|
return urllib.request.HTTPCookieProcessor.http_response(self, request, response)
|
||||||
|
|
||||||
https_request = compat_urllib_request.HTTPCookieProcessor.http_request
|
https_request = urllib.request.HTTPCookieProcessor.http_request
|
||||||
https_response = http_response
|
https_response = http_response
|
||||||
|
|
||||||
|
|
||||||
class YoutubeDLRedirectHandler(compat_urllib_request.HTTPRedirectHandler):
|
class YoutubeDLRedirectHandler(urllib.request.HTTPRedirectHandler):
|
||||||
"""YoutubeDL redirect handler
|
"""YoutubeDL redirect handler
|
||||||
|
|
||||||
The code is based on HTTPRedirectHandler implementation from CPython [1].
|
The code is based on HTTPRedirectHandler implementation from CPython [1].
|
||||||
@ -1630,7 +1625,7 @@ class YoutubeDLRedirectHandler(compat_urllib_request.HTTPRedirectHandler):
|
|||||||
3. https://github.com/ytdl-org/youtube-dl/issues/28768
|
3. https://github.com/ytdl-org/youtube-dl/issues/28768
|
||||||
"""
|
"""
|
||||||
|
|
||||||
http_error_301 = http_error_303 = http_error_307 = http_error_308 = compat_urllib_request.HTTPRedirectHandler.http_error_302
|
http_error_301 = http_error_303 = http_error_307 = http_error_308 = urllib.request.HTTPRedirectHandler.http_error_302
|
||||||
|
|
||||||
def redirect_request(self, req, fp, code, msg, headers, newurl):
|
def redirect_request(self, req, fp, code, msg, headers, newurl):
|
||||||
"""Return a Request or None in response to a redirect.
|
"""Return a Request or None in response to a redirect.
|
||||||
@ -1672,7 +1667,7 @@ def redirect_request(self, req, fp, code, msg, headers, newurl):
|
|||||||
if code in (301, 302) and m == 'POST':
|
if code in (301, 302) and m == 'POST':
|
||||||
m = 'GET'
|
m = 'GET'
|
||||||
|
|
||||||
return compat_urllib_request.Request(
|
return urllib.request.Request(
|
||||||
newurl, headers=newheaders, origin_req_host=req.origin_req_host,
|
newurl, headers=newheaders, origin_req_host=req.origin_req_host,
|
||||||
unverifiable=True, method=m)
|
unverifiable=True, method=m)
|
||||||
|
|
||||||
@ -1967,7 +1962,7 @@ def bytes_to_intlist(bs):
|
|||||||
def intlist_to_bytes(xs):
|
def intlist_to_bytes(xs):
|
||||||
if not xs:
|
if not xs:
|
||||||
return b''
|
return b''
|
||||||
return compat_struct_pack('%dB' % len(xs), *xs)
|
return struct.pack('%dB' % len(xs), *xs)
|
||||||
|
|
||||||
|
|
||||||
class LockingUnsupportedError(OSError):
|
class LockingUnsupportedError(OSError):
|
||||||
@ -2427,12 +2422,12 @@ def urljoin(base, path):
|
|||||||
return compat_urlparse.urljoin(base, path)
|
return compat_urlparse.urljoin(base, path)
|
||||||
|
|
||||||
|
|
||||||
class HEADRequest(compat_urllib_request.Request):
|
class HEADRequest(urllib.request.Request):
|
||||||
def get_method(self):
|
def get_method(self):
|
||||||
return 'HEAD'
|
return 'HEAD'
|
||||||
|
|
||||||
|
|
||||||
class PUTRequest(compat_urllib_request.Request):
|
class PUTRequest(urllib.request.Request):
|
||||||
def get_method(self):
|
def get_method(self):
|
||||||
return 'PUT'
|
return 'PUT'
|
||||||
|
|
||||||
@ -2484,7 +2479,7 @@ def url_or_none(url):
|
|||||||
|
|
||||||
|
|
||||||
def request_to_url(req):
|
def request_to_url(req):
|
||||||
if isinstance(req, compat_urllib_request.Request):
|
if isinstance(req, urllib.request.Request):
|
||||||
return req.get_full_url()
|
return req.get_full_url()
|
||||||
else:
|
else:
|
||||||
return req
|
return req
|
||||||
@ -3037,7 +3032,7 @@ def update_Request(req, url=None, data=None, headers={}, query={}):
|
|||||||
elif req_get_method == 'PUT':
|
elif req_get_method == 'PUT':
|
||||||
req_type = PUTRequest
|
req_type = PUTRequest
|
||||||
else:
|
else:
|
||||||
req_type = compat_urllib_request.Request
|
req_type = urllib.request.Request
|
||||||
new_req = req_type(
|
new_req = req_type(
|
||||||
req_url, data=req_data, headers=req_headers,
|
req_url, data=req_data, headers=req_headers,
|
||||||
origin_req_host=req.origin_req_host, unverifiable=req.unverifiable)
|
origin_req_host=req.origin_req_host, unverifiable=req.unverifiable)
|
||||||
@ -4636,20 +4631,20 @@ def random_ipv4(cls, code_or_block):
|
|||||||
else:
|
else:
|
||||||
block = code_or_block
|
block = code_or_block
|
||||||
addr, preflen = block.split('/')
|
addr, preflen = block.split('/')
|
||||||
addr_min = compat_struct_unpack('!L', socket.inet_aton(addr))[0]
|
addr_min = struct.unpack('!L', socket.inet_aton(addr))[0]
|
||||||
addr_max = addr_min | (0xffffffff >> int(preflen))
|
addr_max = addr_min | (0xffffffff >> int(preflen))
|
||||||
return compat_str(socket.inet_ntoa(
|
return compat_str(socket.inet_ntoa(
|
||||||
compat_struct_pack('!L', random.randint(addr_min, addr_max))))
|
struct.pack('!L', random.randint(addr_min, addr_max))))
|
||||||
|
|
||||||
|
|
||||||
class PerRequestProxyHandler(compat_urllib_request.ProxyHandler):
|
class PerRequestProxyHandler(urllib.request.ProxyHandler):
|
||||||
def __init__(self, proxies=None):
|
def __init__(self, proxies=None):
|
||||||
# Set default handlers
|
# Set default handlers
|
||||||
for type in ('http', 'https'):
|
for type in ('http', 'https'):
|
||||||
setattr(self, '%s_open' % type,
|
setattr(self, '%s_open' % type,
|
||||||
lambda r, proxy='__noproxy__', type=type, meth=self.proxy_open:
|
lambda r, proxy='__noproxy__', type=type, meth=self.proxy_open:
|
||||||
meth(r, proxy, type))
|
meth(r, proxy, type))
|
||||||
compat_urllib_request.ProxyHandler.__init__(self, proxies)
|
urllib.request.ProxyHandler.__init__(self, proxies)
|
||||||
|
|
||||||
def proxy_open(self, req, proxy, type):
|
def proxy_open(self, req, proxy, type):
|
||||||
req_proxy = req.headers.get('Ytdl-request-proxy')
|
req_proxy = req.headers.get('Ytdl-request-proxy')
|
||||||
@ -4663,7 +4658,7 @@ def proxy_open(self, req, proxy, type):
|
|||||||
req.add_header('Ytdl-socks-proxy', proxy)
|
req.add_header('Ytdl-socks-proxy', proxy)
|
||||||
# yt-dlp's http/https handlers do wrapping the socket with socks
|
# yt-dlp's http/https handlers do wrapping the socket with socks
|
||||||
return None
|
return None
|
||||||
return compat_urllib_request.ProxyHandler.proxy_open(
|
return urllib.request.ProxyHandler.proxy_open(
|
||||||
self, req, proxy, type)
|
self, req, proxy, type)
|
||||||
|
|
||||||
|
|
||||||
@ -4683,7 +4678,7 @@ def long_to_bytes(n, blocksize=0):
|
|||||||
s = b''
|
s = b''
|
||||||
n = int(n)
|
n = int(n)
|
||||||
while n > 0:
|
while n > 0:
|
||||||
s = compat_struct_pack('>I', n & 0xffffffff) + s
|
s = struct.pack('>I', n & 0xffffffff) + s
|
||||||
n = n >> 32
|
n = n >> 32
|
||||||
# strip off leading zeros
|
# strip off leading zeros
|
||||||
for i in range(len(s)):
|
for i in range(len(s)):
|
||||||
@ -4714,7 +4709,7 @@ def bytes_to_long(s):
|
|||||||
s = b'\000' * extra + s
|
s = b'\000' * extra + s
|
||||||
length = length + extra
|
length = length + extra
|
||||||
for i in range(0, length, 4):
|
for i in range(0, length, 4):
|
||||||
acc = (acc << 32) + compat_struct_unpack('>I', s[i:i + 4])[0]
|
acc = (acc << 32) + struct.unpack('>I', s[i:i + 4])[0]
|
||||||
return acc
|
return acc
|
||||||
|
|
||||||
|
|
||||||
@ -4842,7 +4837,7 @@ def decode_png(png_data):
|
|||||||
raise OSError('Not a valid PNG file.')
|
raise OSError('Not a valid PNG file.')
|
||||||
|
|
||||||
int_map = {1: '>B', 2: '>H', 4: '>I'}
|
int_map = {1: '>B', 2: '>H', 4: '>I'}
|
||||||
unpack_integer = lambda x: compat_struct_unpack(int_map[len(x)], x)[0]
|
unpack_integer = lambda x: struct.unpack(int_map[len(x)], x)[0]
|
||||||
|
|
||||||
chunks = []
|
chunks = []
|
||||||
|
|
||||||
@ -4954,7 +4949,6 @@ def write_xattr(path, key, value):
|
|||||||
return
|
return
|
||||||
|
|
||||||
# UNIX Method 1. Use xattrs/pyxattrs modules
|
# UNIX Method 1. Use xattrs/pyxattrs modules
|
||||||
from .dependencies import xattr
|
|
||||||
|
|
||||||
setxattr = None
|
setxattr = None
|
||||||
if getattr(xattr, '_yt_dlp__identifier', None) == 'pyxattr':
|
if getattr(xattr, '_yt_dlp__identifier', None) == 'pyxattr':
|
||||||
|
Loading…
Reference in New Issue
Block a user