Compare commits

..

No commits in common. "14f25df2b6233553e968df023430ca96c0b1df9f" and "379a4f161d4ad3e40932dcf5aca6e6fb9715ab28" have entirely different histories.

109 changed files with 779 additions and 738 deletions

View File

@ -1,12 +1,9 @@
#!/usr/bin/env python3
# Allow direct execution
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import yt_dlp
BASH_COMPLETION_FILE = "completions/bash/yt-dlp"

View File

@ -13,12 +13,10 @@ import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import urllib.parse
import urllib.request
from test.helper import gettestcases
from yt_dlp.utils import compat_urllib_parse_urlparse, compat_urllib_request
if len(sys.argv) > 1:
METHOD = 'LIST'
LIST = open(sys.argv[1]).read().decode('utf8').strip()
@ -28,7 +26,7 @@ else:
for test in gettestcases():
if METHOD == 'EURISTIC':
try:
webpage = urllib.request.urlopen(test['url'], timeout=10).read()
webpage = compat_urllib_request.urlopen(test['url'], timeout=10).read()
except Exception:
print('\nFail: {}'.format(test['name']))
continue
@ -38,7 +36,7 @@ for test in gettestcases():
RESULT = 'porn' in webpage.lower()
elif METHOD == 'LIST':
domain = urllib.parse.urlparse(test['url']).netloc
domain = compat_urllib_parse_urlparse(test['url']).netloc
if not domain:
print('\nFail: {}'.format(test['name']))
continue

View File

@ -1,14 +1,10 @@
#!/usr/bin/env python3
# Allow direct execution
import optparse
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import optparse
import yt_dlp
from yt_dlp.utils import shell_quote

View File

@ -1,15 +1,11 @@
#!/usr/bin/env python3
# Allow direct execution
import codecs
import os
import subprocess
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import codecs
import subprocess
from yt_dlp.aes import aes_encrypt, key_expansion
from yt_dlp.utils import intlist_to_bytes

View File

@ -1,5 +1,4 @@
#!/usr/bin/env python3
import optparse
import re

View File

@ -1,12 +1,4 @@
#!/usr/bin/env python3
# Allow direct execution
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import optparse
@ -15,7 +7,7 @@ def read(fname):
return f.read()
# Get the version without importing the package
# Get the version from yt_dlp/version.py without importing the package
def read_version(fname):
exec(compile(read(fname), fname, 'exec'))
return locals()['__version__']

View File

@ -1,15 +1,12 @@
#!/usr/bin/env python3
# Allow direct execution
import optparse
import os
import sys
from inspect import getsource
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import optparse
from inspect import getsource
NO_ATTR = object()
STATIC_CLASS_PROPERTIES = ['IE_NAME', 'IE_DESC', 'SEARCH_KEY', '_WORKING', '_NETRC_MACHINE', 'age_limit']
CLASS_METHODS = [

View File

@ -1,11 +1,7 @@
#!/usr/bin/env python3
"""
yt-dlp --help | make_readme.py
This must be run in a console of correct width
"""
# yt-dlp --help | make_readme.py
# This must be run in a console of correct width
import functools
import re
import sys

View File

@ -1,14 +1,10 @@
#!/usr/bin/env python3
# Allow direct execution
import optparse
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import optparse
from yt_dlp.extractor import list_extractor_classes

View File

@ -1,5 +1,4 @@
#!/usr/bin/env python3
import optparse
import os.path
import re
@ -24,7 +23,7 @@ yt\-dlp \- A youtube-dl fork with additional features and patches
def main():
parser = optparse.OptionParser(usage='%prog OUTFILE.md')
_, args = parser.parse_args()
options, args = parser.parse_args()
if len(args) != 1:
parser.error('Expected an output filename')

View File

@ -1,15 +1,12 @@
#!/usr/bin/env python3
# Allow direct execution
import json
import os
import re
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import json
import re
import urllib.request
from yt_dlp.compat import compat_urllib_request
# usage: python3 ./devscripts/update-formulae.py <path-to-formulae-rb> <version>
# version can be either 0-aligned (yt-dlp version) or normalized (PyPl version)
@ -18,7 +15,7 @@ filename, version = sys.argv[1:]
normalized_version = '.'.join(str(int(x)) for x in version.split('.'))
pypi_release = json.loads(urllib.request.urlopen(
pypi_release = json.loads(compat_urllib_request.urlopen(
'https://pypi.org/pypi/yt-dlp/%s/json' % normalized_version
).read().decode())

View File

@ -1,12 +1,4 @@
#!/usr/bin/env python3
# Allow direct execution
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import subprocess
import sys
from datetime import datetime

View File

@ -1,12 +1,9 @@
#!/usr/bin/env python3
# Allow direct execution
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import yt_dlp
ZSH_COMPLETION_FILE = "completions/zsh/_yt-dlp"

View File

@ -1,5 +1,4 @@
#!/usr/bin/env python3
import os
import platform
import sys

View File

@ -37,5 +37,3 @@ line_length = 80
reverse_relative = true
ensure_newline_before_comments = true
include_trailing_comma = true
known_first_party =
test

View File

@ -1,5 +1,4 @@
#!/usr/bin/env python3
import os.path
import sys
import warnings

View File

@ -9,7 +9,7 @@ import types
import yt_dlp.extractor
from yt_dlp import YoutubeDL
from yt_dlp.compat import compat_os_name
from yt_dlp.compat import compat_os_name, compat_str
from yt_dlp.utils import preferredencoding, write_string
if 'pytest' in sys.modules:
@ -96,29 +96,29 @@ md5 = lambda s: hashlib.md5(s.encode()).hexdigest()
def expect_value(self, got, expected, field):
if isinstance(expected, str) and expected.startswith('re:'):
if isinstance(expected, compat_str) and expected.startswith('re:'):
match_str = expected[len('re:'):]
match_rex = re.compile(match_str)
self.assertTrue(
isinstance(got, str),
f'Expected a {str.__name__} object, but got {type(got).__name__} for field {field}')
isinstance(got, compat_str),
f'Expected a {compat_str.__name__} object, but got {type(got).__name__} for field {field}')
self.assertTrue(
match_rex.match(got),
f'field {field} (value: {got!r}) should match {match_str!r}')
elif isinstance(expected, str) and expected.startswith('startswith:'):
elif isinstance(expected, compat_str) and expected.startswith('startswith:'):
start_str = expected[len('startswith:'):]
self.assertTrue(
isinstance(got, str),
f'Expected a {str.__name__} object, but got {type(got).__name__} for field {field}')
isinstance(got, compat_str),
f'Expected a {compat_str.__name__} object, but got {type(got).__name__} for field {field}')
self.assertTrue(
got.startswith(start_str),
f'field {field} (value: {got!r}) should start with {start_str!r}')
elif isinstance(expected, str) and expected.startswith('contains:'):
elif isinstance(expected, compat_str) and expected.startswith('contains:'):
contains_str = expected[len('contains:'):]
self.assertTrue(
isinstance(got, str),
f'Expected a {str.__name__} object, but got {type(got).__name__} for field {field}')
isinstance(got, compat_str),
f'Expected a {compat_str.__name__} object, but got {type(got).__name__} for field {field}')
self.assertTrue(
contains_str in got,
f'field {field} (value: {got!r}) should contain {contains_str!r}')
@ -142,12 +142,12 @@ def expect_value(self, got, expected, field):
index, field, type_expected, type_got))
expect_value(self, item_got, item_expected, field)
else:
if isinstance(expected, str) and expected.startswith('md5:'):
if isinstance(expected, compat_str) and expected.startswith('md5:'):
self.assertTrue(
isinstance(got, str),
isinstance(got, compat_str),
f'Expected field {field} to be a unicode object, but got value {got!r} of type {type(got)!r}')
got = 'md5:' + md5(got)
elif isinstance(expected, str) and re.match(r'^(?:min|max)?count:\d+', expected):
elif isinstance(expected, compat_str) and re.match(r'^(?:min|max)?count:\d+', expected):
self.assertTrue(
isinstance(got, (list, dict)),
f'Expected field {field} to be a list or a dict, but it is of type {type(got).__name__}')
@ -236,7 +236,7 @@ def expect_info_dict(self, got_dict, expected_dict):
missing_keys = set(test_info_dict.keys()) - set(expected_dict.keys())
if missing_keys:
def _repr(v):
if isinstance(v, str):
if isinstance(v, compat_str):
return "'%s'" % v.replace('\\', '\\\\').replace("'", "\\'").replace('\n', '\\n')
elif isinstance(v, type):
return v.__name__

View File

@ -1,5 +1,4 @@
#!/usr/bin/env python3
# Allow direct execution
import os
import sys
@ -7,12 +6,10 @@ import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import http.server
import threading
from test.helper import FakeYDL, expect_dict, expect_value, http_server_port
from yt_dlp.compat import compat_etree_fromstring
from yt_dlp.compat import compat_etree_fromstring, compat_http_server
from yt_dlp.extractor import YoutubeIE, get_info_extractor
from yt_dlp.extractor.common import InfoExtractor
from yt_dlp.utils import (
@ -26,7 +23,7 @@ TEAPOT_RESPONSE_STATUS = 418
TEAPOT_RESPONSE_BODY = "<h1>418 I'm a teapot</h1>"
class InfoExtractorTestRequestHandler(http.server.BaseHTTPRequestHandler):
class InfoExtractorTestRequestHandler(compat_http_server.BaseHTTPRequestHandler):
def log_message(self, format, *args):
pass
@ -1658,7 +1655,7 @@ jwplayer("mediaplayer").setup({"abouttext":"Visit Indie DB","aboutlink":"http:\/
# or the underlying `_download_webpage_handle` returning no content
# when a response matches `expected_status`.
httpd = http.server.HTTPServer(
httpd = compat_http_server.HTTPServer(
('127.0.0.1', 0), InfoExtractorTestRequestHandler)
port = http_server_port(httpd)
server_thread = threading.Thread(target=httpd.serve_forever)

View File

@ -1,5 +1,4 @@
#!/usr/bin/env python3
# Allow direct execution
import os
import sys
@ -7,14 +6,17 @@ import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import copy
import json
import urllib.error
from test.helper import FakeYDL, assertRegexpMatches
from yt_dlp import YoutubeDL
from yt_dlp.compat import compat_os_name
from yt_dlp.compat import (
compat_os_name,
compat_setenv,
compat_str,
compat_urllib_error,
)
from yt_dlp.extractor import YoutubeIE
from yt_dlp.extractor.common import InfoExtractor
from yt_dlp.postprocessor.common import PostProcessor
@ -839,14 +841,14 @@ class TestYoutubeDL(unittest.TestCase):
# test('%(foo|)s', ('', '_')) # fixme
# Environment variable expansion for prepare_filename
os.environ['__yt_dlp_var'] = 'expanded'
compat_setenv('__yt_dlp_var', 'expanded')
envvar = '%__yt_dlp_var%' if compat_os_name == 'nt' else '$__yt_dlp_var'
test(envvar, (envvar, 'expanded'))
if compat_os_name == 'nt':
test('%s%', ('%s%', '%s%'))
os.environ['s'] = 'expanded'
compat_setenv('s', 'expanded')
test('%s%', ('%s%', 'expanded')) # %s% should be expanded before escaping %s
os.environ['(test)s'] = 'expanded'
compat_setenv('(test)s', 'expanded')
test('%(test)s%', ('NA%', 'expanded')) # Environment should take priority over template
# Path expansion and escaping
@ -1099,7 +1101,7 @@ class TestYoutubeDL(unittest.TestCase):
def test_urlopen_no_file_protocol(self):
# see https://github.com/ytdl-org/youtube-dl/issues/8227
ydl = YDL()
self.assertRaises(urllib.error.URLError, ydl.urlopen, 'file:///etc/passwd')
self.assertRaises(compat_urllib_error.URLError, ydl.urlopen, 'file:///etc/passwd')
def test_do_not_override_ie_key_in_url_transparent(self):
ydl = YDL()
@ -1185,7 +1187,7 @@ class TestYoutubeDL(unittest.TestCase):
def _entries(self):
for n in range(3):
video_id = str(n)
video_id = compat_str(n)
yield {
'_type': 'url_transparent',
'ie_key': VideoIE.ie_key(),

View File

@ -1,16 +1,12 @@
#!/usr/bin/env python3
# Allow direct execution
import os
import re
import sys
import tempfile
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import re
import tempfile
from yt_dlp.utils import YoutubeDLCookieJar

View File

@ -1,5 +1,4 @@
#!/usr/bin/env python3
# Allow direct execution
import os
import sys
@ -7,7 +6,6 @@ import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import base64
from yt_dlp.aes import (

View File

@ -1,5 +1,4 @@
#!/usr/bin/env python3
# Allow direct execution
import os
import sys
@ -7,8 +6,8 @@ import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from test.helper import is_download_test, try_rm
from yt_dlp import YoutubeDL

View File

@ -1,6 +1,6 @@
#!/usr/bin/env python3
# Allow direct execution
import collections
import os
import sys
import unittest
@ -8,9 +8,8 @@ import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import collections
from test.helper import gettestcases
from yt_dlp.extractor import FacebookIE, YoutubeIE, gen_extractors

View File

@ -1,16 +1,15 @@
#!/usr/bin/env python3
# Allow direct execution
import os
import shutil
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import shutil
from test.helper import FakeYDL
from yt_dlp.cache import Cache

View File

@ -1,5 +1,4 @@
#!/usr/bin/env python3
# Allow direct execution
import os
import sys
@ -8,14 +7,16 @@ import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import struct
import urllib.parse
from yt_dlp import compat
from yt_dlp.compat import (
compat_etree_fromstring,
compat_expanduser,
compat_getenv,
compat_setenv,
compat_str,
compat_struct_unpack,
compat_urllib_parse_unquote,
compat_urllib_parse_unquote_plus,
compat_urllib_parse_urlencode,
)
@ -25,19 +26,28 @@ class TestCompat(unittest.TestCase):
with self.assertWarns(DeprecationWarning):
compat.compat_basestring
with self.assertWarns(DeprecationWarning):
compat.WINDOWS_VT_MODE
compat.asyncio.events # Must not raise error
def test_compat_getenv(self):
test_str = 'тест'
compat_setenv('yt_dlp_COMPAT_GETENV', test_str)
self.assertEqual(compat_getenv('yt_dlp_COMPAT_GETENV'), test_str)
def test_compat_setenv(self):
test_var = 'yt_dlp_COMPAT_SETENV'
test_str = 'тест'
compat_setenv(test_var, test_str)
compat_getenv(test_var)
self.assertEqual(compat_getenv(test_var), test_str)
def test_compat_expanduser(self):
old_home = os.environ.get('HOME')
test_str = R'C:\Documents and Settings\тест\Application Data'
try:
os.environ['HOME'] = test_str
compat_setenv('HOME', test_str)
self.assertEqual(compat_expanduser('~'), test_str)
finally:
os.environ['HOME'] = old_home or ''
compat_setenv('HOME', old_home or '')
def test_compat_urllib_parse_unquote(self):
self.assertEqual(compat_urllib_parse_unquote('abc%20def'), 'abc def')
@ -59,8 +69,8 @@ class TestCompat(unittest.TestCase):
'''(^◣_◢^)っ︻デ═一 ⇀ ⇀ ⇀ ⇀ ⇀ ↶%I%Break%Things%''')
def test_compat_urllib_parse_unquote_plus(self):
self.assertEqual(urllib.parse.unquote_plus('abc%20def'), 'abc def')
self.assertEqual(urllib.parse.unquote_plus('%7e/abc+def'), '~/abc def')
self.assertEqual(compat_urllib_parse_unquote_plus('abc%20def'), 'abc def')
self.assertEqual(compat_urllib_parse_unquote_plus('%7e/abc+def'), '~/abc def')
def test_compat_urllib_parse_urlencode(self):
self.assertEqual(compat_urllib_parse_urlencode({'abc': 'def'}), 'abc=def')
@ -81,11 +91,11 @@ class TestCompat(unittest.TestCase):
</root>
'''
doc = compat_etree_fromstring(xml.encode())
self.assertTrue(isinstance(doc.attrib['foo'], str))
self.assertTrue(isinstance(doc.attrib['spam'], str))
self.assertTrue(isinstance(doc.find('normal').text, str))
self.assertTrue(isinstance(doc.find('chinese').text, str))
self.assertTrue(isinstance(doc.find('foo/bar').text, str))
self.assertTrue(isinstance(doc.attrib['foo'], compat_str))
self.assertTrue(isinstance(doc.attrib['spam'], compat_str))
self.assertTrue(isinstance(doc.find('normal').text, compat_str))
self.assertTrue(isinstance(doc.find('chinese').text, compat_str))
self.assertTrue(isinstance(doc.find('foo/bar').text, compat_str))
def test_compat_etree_fromstring_doctype(self):
xml = '''<?xml version="1.0"?>
@ -94,7 +104,7 @@ class TestCompat(unittest.TestCase):
compat_etree_fromstring(xml)
def test_struct_unpack(self):
self.assertEqual(struct.unpack('!B', b'\x00'), (0,))
self.assertEqual(compat_struct_unpack('!B', b'\x00'), (0,))
if __name__ == '__main__':

View File

@ -1,19 +1,14 @@
#!/usr/bin/env python3
# Allow direct execution
import hashlib
import json
import os
import socket
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import hashlib
import http.client
import json
import socket
import urllib.error
from test.helper import (
assertGreaterEqual,
expect_info_dict,
@ -25,7 +20,12 @@ from test.helper import (
try_rm,
)
import yt_dlp.YoutubeDL # isort: split
import yt_dlp.YoutubeDL
from yt_dlp.compat import (
compat_http_client,
compat_HTTPError,
compat_urllib_error,
)
from yt_dlp.extractor import get_info_extractor
from yt_dlp.utils import (
DownloadError,
@ -167,7 +167,7 @@ def generator(test_case, tname):
force_generic_extractor=params.get('force_generic_extractor', False))
except (DownloadError, ExtractorError) as err:
# Check if the exception is not a network related one
if not err.exc_info[0] in (urllib.error.URLError, socket.timeout, UnavailableVideoError, http.client.BadStatusLine) or (err.exc_info[0] == urllib.error.HTTPError and err.exc_info[1].code == 503):
if not err.exc_info[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError, compat_http_client.BadStatusLine) or (err.exc_info[0] == compat_HTTPError and err.exc_info[1].code == 503):
raise
if try_num == RETRIES:

View File

@ -1,19 +1,17 @@
#!/usr/bin/env python3
# Allow direct execution
import os
import re
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import http.server
import re
import threading
from test.helper import http_server_port, try_rm
from yt_dlp import YoutubeDL
from yt_dlp.compat import compat_http_server
from yt_dlp.downloader.http import HttpFD
from yt_dlp.utils import encodeFilename
@ -23,7 +21,7 @@ TEST_DIR = os.path.dirname(os.path.abspath(__file__))
TEST_SIZE = 10 * 1024
class HTTPTestRequestHandler(http.server.BaseHTTPRequestHandler):
class HTTPTestRequestHandler(compat_http_server.BaseHTTPRequestHandler):
def log_message(self, format, *args):
pass
@ -80,7 +78,7 @@ class FakeLogger:
class TestHttpFD(unittest.TestCase):
def setUp(self):
self.httpd = http.server.HTTPServer(
self.httpd = compat_http_server.HTTPServer(
('127.0.0.1', 0), HTTPTestRequestHandler)
self.port = http_server_port(self.httpd)
self.server_thread = threading.Thread(target=self.httpd.serve_forever)

View File

@ -1,16 +1,12 @@
#!/usr/bin/env python3
# Allow direct execution
import contextlib
import os
import subprocess
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import contextlib
import subprocess
from yt_dlp.utils import encodeArgument
rootDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))

View File

@ -1,5 +1,4 @@
#!/usr/bin/env python3
# Allow direct execution
import os
import sys
@ -7,19 +6,17 @@ import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import http.server
import ssl
import threading
import urllib.request
from test.helper import http_server_port
from yt_dlp import YoutubeDL
from yt_dlp.compat import compat_http_server, compat_urllib_request
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
class HTTPTestRequestHandler(http.server.BaseHTTPRequestHandler):
class HTTPTestRequestHandler(compat_http_server.BaseHTTPRequestHandler):
def log_message(self, format, *args):
pass
@ -56,7 +53,7 @@ class FakeLogger:
class TestHTTP(unittest.TestCase):
def setUp(self):
self.httpd = http.server.HTTPServer(
self.httpd = compat_http_server.HTTPServer(
('127.0.0.1', 0), HTTPTestRequestHandler)
self.port = http_server_port(self.httpd)
self.server_thread = threading.Thread(target=self.httpd.serve_forever)
@ -67,7 +64,7 @@ class TestHTTP(unittest.TestCase):
class TestHTTPS(unittest.TestCase):
def setUp(self):
certfn = os.path.join(TEST_DIR, 'testcert.pem')
self.httpd = http.server.HTTPServer(
self.httpd = compat_http_server.HTTPServer(
('127.0.0.1', 0), HTTPTestRequestHandler)
sslctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
sslctx.load_cert_chain(certfn, None)
@ -93,7 +90,7 @@ class TestClientCert(unittest.TestCase):
certfn = os.path.join(TEST_DIR, 'testcert.pem')
self.certdir = os.path.join(TEST_DIR, 'testdata', 'certificate')
cacertfn = os.path.join(self.certdir, 'ca.crt')
self.httpd = http.server.HTTPServer(('127.0.0.1', 0), HTTPTestRequestHandler)
self.httpd = compat_http_server.HTTPServer(('127.0.0.1', 0), HTTPTestRequestHandler)
sslctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
sslctx.verify_mode = ssl.CERT_REQUIRED
sslctx.load_verify_locations(cafile=cacertfn)
@ -133,7 +130,7 @@ class TestClientCert(unittest.TestCase):
def _build_proxy_handler(name):
class HTTPTestRequestHandler(http.server.BaseHTTPRequestHandler):
class HTTPTestRequestHandler(compat_http_server.BaseHTTPRequestHandler):
proxy_name = name
def log_message(self, format, *args):
@ -149,14 +146,14 @@ def _build_proxy_handler(name):
class TestProxy(unittest.TestCase):
def setUp(self):
self.proxy = http.server.HTTPServer(
self.proxy = compat_http_server.HTTPServer(
('127.0.0.1', 0), _build_proxy_handler('normal'))
self.port = http_server_port(self.proxy)
self.proxy_thread = threading.Thread(target=self.proxy.serve_forever)
self.proxy_thread.daemon = True
self.proxy_thread.start()
self.geo_proxy = http.server.HTTPServer(
self.geo_proxy = compat_http_server.HTTPServer(
('127.0.0.1', 0), _build_proxy_handler('geo'))
self.geo_port = http_server_port(self.geo_proxy)
self.geo_proxy_thread = threading.Thread(target=self.geo_proxy.serve_forever)
@ -173,7 +170,7 @@ class TestProxy(unittest.TestCase):
response = ydl.urlopen(url).read().decode()
self.assertEqual(response, f'normal: {url}')
req = urllib.request.Request(url)
req = compat_urllib_request.Request(url)
req.add_header('Ytdl-request-proxy', geo_proxy)
response = ydl.urlopen(req).read().decode()
self.assertEqual(response, f'geo: {url}')

View File

@ -1,5 +1,4 @@
#!/usr/bin/env python3
# Allow direct execution
import os
import sys
@ -7,8 +6,8 @@ import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from test.helper import FakeYDL, is_download_test
from yt_dlp.extractor import IqiyiIE

View File

@ -1,5 +1,4 @@
#!/usr/bin/env python3
# Allow direct execution
import os
import sys
@ -7,7 +6,6 @@ import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from yt_dlp.jsinterp import JSInterpreter

View File

@ -1,6 +1,3 @@
#!/usr/bin/env python3
# Allow direct execution
import os
import sys
import unittest

View File

@ -1,15 +1,11 @@
#!/usr/bin/env python3
# Allow direct execution
import os
import subprocess
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import subprocess
from test.helper import is_download_test, try_rm
root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))

View File

@ -1,15 +1,13 @@
#!/usr/bin/env python3
# Allow direct execution
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from test.helper import get_params, is_download_test, try_rm
import yt_dlp.YoutubeDL # isort: split
import yt_dlp.YoutubeDL
from yt_dlp.utils import DownloadError

View File

@ -1,5 +1,4 @@
#!/usr/bin/env python3
# Allow direct execution
import os
import sys
@ -7,7 +6,6 @@ import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from yt_dlp import YoutubeDL
from yt_dlp.compat import compat_shlex_quote
from yt_dlp.postprocessor import (

View File

@ -1,5 +1,4 @@
#!/usr/bin/env python3
# Allow direct execution
import os
import sys
@ -7,13 +6,12 @@ import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import random
import subprocess
import urllib.request
from test.helper import FakeYDL, get_params, is_download_test
from yt_dlp.compat import compat_str, compat_urllib_request
@is_download_test
class TestMultipleSocks(unittest.TestCase):
@ -53,7 +51,7 @@ class TestMultipleSocks(unittest.TestCase):
if params is None:
return
ydl = FakeYDL()
req = urllib.request.Request('http://yt-dl.org/ip')
req = compat_urllib_request.Request('http://yt-dl.org/ip')
req.add_header('Ytdl-request-proxy', params['secondary_proxy'])
self.assertEqual(
ydl.urlopen(req).read().decode(),
@ -64,7 +62,7 @@ class TestMultipleSocks(unittest.TestCase):
if params is None:
return
ydl = FakeYDL()
req = urllib.request.Request('https://yt-dl.org/ip')
req = compat_urllib_request.Request('https://yt-dl.org/ip')
req.add_header('Ytdl-request-proxy', params['secondary_proxy'])
self.assertEqual(
ydl.urlopen(req).read().decode(),
@ -101,13 +99,13 @@ class TestSocks(unittest.TestCase):
return ydl.urlopen('http://yt-dl.org/ip').read().decode()
def test_socks4(self):
self.assertTrue(isinstance(self._get_ip('socks4'), str))
self.assertTrue(isinstance(self._get_ip('socks4'), compat_str))
def test_socks4a(self):
self.assertTrue(isinstance(self._get_ip('socks4a'), str))
self.assertTrue(isinstance(self._get_ip('socks4a'), compat_str))
def test_socks5(self):
self.assertTrue(isinstance(self._get_ip('socks5'), str))
self.assertTrue(isinstance(self._get_ip('socks5'), compat_str))
if __name__ == '__main__':

View File

@ -1,5 +1,4 @@
#!/usr/bin/env python3
# Allow direct execution
import os
import sys
@ -7,8 +6,8 @@ import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from test.helper import FakeYDL, is_download_test, md5
from yt_dlp.extractor import (
NPOIE,
NRKTVIE,

View File

@ -1,5 +1,4 @@
#!/usr/bin/env python3
# Allow direct execution
import os
import sys

View File

@ -1,6 +1,6 @@
#!/usr/bin/env python3
# Allow direct execution
import contextlib
import os
import sys
import unittest
@ -8,16 +8,19 @@ import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import contextlib
# Various small unit tests
import io
import itertools
import json
import xml.etree.ElementTree
from yt_dlp.compat import (
compat_chr,
compat_etree_fromstring,
compat_getenv,
compat_HTMLParseError,
compat_os_name,
compat_setenv,
)
from yt_dlp.utils import (
Config,
@ -263,20 +266,20 @@ class TestUtil(unittest.TestCase):
def env(var):
return f'%{var}%' if sys.platform == 'win32' else f'${var}'
os.environ['yt_dlp_EXPATH_PATH'] = 'expanded'
compat_setenv('yt_dlp_EXPATH_PATH', 'expanded')
self.assertEqual(expand_path(env('yt_dlp_EXPATH_PATH')), 'expanded')
old_home = os.environ.get('HOME')
test_str = R'C:\Documents and Settings\тест\Application Data'
try:
os.environ['HOME'] = test_str
self.assertEqual(expand_path(env('HOME')), os.getenv('HOME'))
self.assertEqual(expand_path('~'), os.getenv('HOME'))
compat_setenv('HOME', test_str)
self.assertEqual(expand_path(env('HOME')), compat_getenv('HOME'))
self.assertEqual(expand_path('~'), compat_getenv('HOME'))
self.assertEqual(
expand_path('~/%s' % env('yt_dlp_EXPATH_PATH')),
'%s/expanded' % os.getenv('HOME'))
'%s/expanded' % compat_getenv('HOME'))
finally:
os.environ['HOME'] = old_home or ''
compat_setenv('HOME', old_home or '')
def test_prepend_extension(self):
self.assertEqual(prepend_extension('abc.ext', 'temp'), 'abc.temp.ext')
@ -1125,7 +1128,7 @@ class TestUtil(unittest.TestCase):
self.assertEqual(extract_attributes('<e x="décompose&#769;">'), {'x': 'décompose\u0301'})
# "Narrow" Python builds don't support unicode code points outside BMP.
try:
chr(0x10000)
compat_chr(0x10000)
supports_outside_bmp = True
except ValueError:
supports_outside_bmp = False

View File

@ -1,15 +1,11 @@
#!/usr/bin/env python3
# Allow direct execution
import os
import subprocess
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import subprocess
rootDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))

View File

@ -1,5 +1,4 @@
#!/usr/bin/env python3
# Allow direct execution
import os
import sys
@ -7,12 +6,11 @@ import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import xml.etree.ElementTree
from test.helper import get_params, is_download_test, try_rm
import yt_dlp.extractor
import yt_dlp.YoutubeDL
from test.helper import get_params, is_download_test, try_rm
class YoutubeDL(yt_dlp.YoutubeDL):

View File

@ -1,5 +1,4 @@
#!/usr/bin/env python3
# Allow direct execution
import os
import sys
@ -7,8 +6,8 @@ import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from test.helper import FakeYDL, is_download_test
from yt_dlp.extractor import YoutubeIE, YoutubeTabIE

View File

@ -1,5 +1,4 @@
#!/usr/bin/env python3
# Allow direct execution
import os
import sys

View File

@ -1,19 +1,18 @@
#!/usr/bin/env python3
# Allow direct execution
import contextlib
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import contextlib
import re
import string
import urllib.request
from test.helper import FakeYDL, is_download_test
from yt_dlp.compat import compat_str
from yt_dlp.extractor import YoutubeIE
from yt_dlp.jsinterp import JSInterpreter
@ -158,7 +157,7 @@ def t_factory(name, sig_func, url_pattern):
def signature(jscode, sig_input):
func = YoutubeIE(FakeYDL())._parse_sig_js(jscode)
src_sig = (
str(string.printable[:sig_input])
compat_str(string.printable[:sig_input])
if isinstance(sig_input, int) else sig_input)
return func(src_sig)

View File

@ -1,3 +1,4 @@
#!/usr/bin/env python3
import collections
import contextlib
import datetime
@ -25,8 +26,15 @@ import urllib.request
from string import ascii_letters
from .cache import Cache
from .compat import HAS_LEGACY as compat_has_legacy
from .compat import compat_os_name, compat_shlex_quote
from .compat import (
HAS_LEGACY as compat_has_legacy,
compat_get_terminal_size,
compat_os_name,
compat_shlex_quote,
compat_str,
compat_urllib_error,
compat_urllib_request,
)
from .cookies import load_cookies
from .downloader import FFmpegFD, get_suitable_downloader, shorten_protocol_name
from .downloader.rtmp import rtmpdump_version
@ -636,7 +644,7 @@ class YoutubeDL:
try:
import pty
master, slave = pty.openpty()
width = shutil.get_terminal_size().columns
width = compat_get_terminal_size().columns
width_args = [] if width is None else ['-w', str(width)]
sp_kwargs = {'stdin': subprocess.PIPE, 'stdout': slave, 'stderr': self._out_files.error}
try:
@ -791,7 +799,7 @@ class YoutubeDL:
return message
assert hasattr(self, '_output_process')
assert isinstance(message, str)
assert isinstance(message, compat_str)
line_count = message.count('\n') + 1
self._output_process.stdin.write((message + '\n').encode())
self._output_process.stdin.flush()
@ -827,7 +835,7 @@ class YoutubeDL:
def to_stderr(self, message, only_once=False):
"""Print message to stderr"""
assert isinstance(message, str)
assert isinstance(message, compat_str)
if self.params.get('logger'):
self.params['logger'].error(message)
else:
@ -1562,7 +1570,7 @@ class YoutubeDL:
additional_urls = (ie_result or {}).get('additional_urls')
if additional_urls:
# TODO: Improve MetadataParserPP to allow setting a list
if isinstance(additional_urls, str):
if isinstance(additional_urls, compat_str):
additional_urls = [additional_urls]
self.to_screen(
'[info] %s: %d additional URL(s) requested' % (ie_result['id'], len(additional_urls)))
@ -2355,10 +2363,10 @@ class YoutubeDL:
def sanitize_string_field(info, string_field):
field = info.get(string_field)
if field is None or isinstance(field, str):
if field is None or isinstance(field, compat_str):
return
report_force_conversion(string_field, 'a string', 'string')
info[string_field] = str(field)
info[string_field] = compat_str(field)
def sanitize_numeric_fields(info):
for numeric_field in self._NUMERIC_FIELDS:
@ -2461,7 +2469,7 @@ class YoutubeDL:
sanitize_numeric_fields(format)
format['url'] = sanitize_url(format['url'])
if not format.get('format_id'):
format['format_id'] = str(i)
format['format_id'] = compat_str(i)
else:
# Sanitize format_id from characters used in format selector expression
format['format_id'] = re.sub(r'[\s,/+\[\]()]', '_', format['format_id'])
@ -2611,7 +2619,7 @@ class YoutubeDL:
if chapter or offset:
new_info.update({
'section_start': offset + chapter.get('start_time', 0),
'section_end': offset + min(chapter.get('end_time', duration), duration),
'section_end': offset + min(chapter.get('end_time', 0), duration),
'section_title': chapter.get('title'),
'section_number': chapter.get('index'),
})
@ -3716,7 +3724,7 @@ class YoutubeDL:
else:
proxies = {'http': opts_proxy, 'https': opts_proxy}
else:
proxies = urllib.request.getproxies()
proxies = compat_urllib_request.getproxies()
# Set HTTPS proxy to HTTP one if given (https://github.com/ytdl-org/youtube-dl/issues/805)
if 'http' in proxies and 'https' not in proxies:
proxies['https'] = proxies['http']
@ -3732,13 +3740,13 @@ class YoutubeDL:
# default FileHandler and allows us to disable the file protocol, which
# can be used for malicious purposes (see
# https://github.com/ytdl-org/youtube-dl/issues/8227)
file_handler = urllib.request.FileHandler()
file_handler = compat_urllib_request.FileHandler()
def file_open(*args, **kwargs):
raise urllib.error.URLError('file:// scheme is explicitly disabled in yt-dlp for security reasons')
raise compat_urllib_error.URLError('file:// scheme is explicitly disabled in yt-dlp for security reasons')
file_handler.file_open = file_open
opener = urllib.request.build_opener(
opener = compat_urllib_request.build_opener(
proxy_handler, https_handler, cookie_processor, ydlh, redirect_handler, data_handler, file_handler)
# Delete the default user-agent header, which would otherwise apply in

View File

@ -1,15 +1,15 @@
#!/usr/bin/env python3
f'You are using an unsupported version of Python. Only Python versions 3.6 and above are supported by yt-dlp' # noqa: F541
__license__ = 'Public Domain'
import getpass
import itertools
import optparse
import os
import re
import sys
from .compat import compat_shlex_quote
from .compat import compat_getpass, compat_shlex_quote
from .cookies import SUPPORTED_BROWSERS, SUPPORTED_KEYRINGS
from .downloader import FileDownloader
from .downloader.external import get_external_downloader
@ -531,9 +531,9 @@ def validate_options(opts):
# Ask for passwords
if opts.username is not None and opts.password is None:
opts.password = getpass.getpass('Type account password and press [Return]: ')
opts.password = compat_getpass('Type account password and press [Return]: ')
if opts.ap_username is not None and opts.ap_password is None:
opts.ap_password = getpass.getpass('Type TV provider account password and press [Return]: ')
opts.ap_password = compat_getpass('Type TV provider account password and press [Return]: ')
return warnings, deprecation_warnings

View File

@ -1,5 +1,4 @@
#!/usr/bin/env python3
# Execute with
# $ python -m yt_dlp

View File

@ -1,7 +1,6 @@
import base64
from math import ceil
from .compat import compat_ord
from .compat import compat_b64decode, compat_ord
from .dependencies import Cryptodome_AES
from .utils import bytes_to_intlist, intlist_to_bytes
@ -265,7 +264,7 @@ def aes_decrypt_text(data, password, key_size_bytes):
"""
NONCE_LENGTH_BYTES = 8
data = bytes_to_intlist(base64.b64decode(data))
data = bytes_to_intlist(compat_b64decode(data))
password = bytes_to_intlist(password.encode())
key = password[:key_size_bytes] + [0] * (key_size_bytes - len(password))

View File

@ -6,6 +6,7 @@ import re
import shutil
import traceback
from .compat import compat_getenv
from .utils import expand_path, write_json_file
@ -16,7 +17,7 @@ class Cache:
def _get_root_dir(self):
res = self._ydl.params.get('cachedir')
if res is None:
cache_root = os.getenv('XDG_CACHE_HOME', '~/.cache')
cache_root = compat_getenv('XDG_CACHE_HOME', '~/.cache')
res = os.path.join(cache_root, 'yt-dlp')
return expand_path(res)

View File

@ -7,6 +7,7 @@ from . import re
from ._deprecated import * # noqa: F401, F403
from .compat_utils import passthrough_module
# XXX: Implement this the same way as other DeprecationWarnings without circular import
try:
passthrough_module(__name__, '._legacy', callback=lambda attr: warnings.warn(

View File

@ -1,16 +1,52 @@
"""Deprecated - New code should avoid these"""
import base64
import urllib.error
import urllib.parse
compat_str = str
import getpass
import html
import html.parser
import http
import http.client
import http.cookiejar
import http.cookies
import http.server
import itertools
import os
import shutil
import struct
import tokenize
import urllib
compat_b64decode = base64.b64decode
compat_chr = chr
compat_cookiejar = http.cookiejar
compat_cookiejar_Cookie = http.cookiejar.Cookie
compat_cookies_SimpleCookie = http.cookies.SimpleCookie
compat_get_terminal_size = shutil.get_terminal_size
compat_getenv = os.getenv
compat_getpass = getpass.getpass
compat_html_entities = html.entities
compat_html_entities_html5 = html.entities.html5
compat_HTMLParser = html.parser.HTMLParser
compat_http_client = http.client
compat_http_server = http.server
compat_HTTPError = urllib.error.HTTPError
compat_urlparse = urllib.parse
compat_itertools_count = itertools.count
compat_parse_qs = urllib.parse.parse_qs
compat_str = str
compat_struct_pack = struct.pack
compat_struct_unpack = struct.unpack
compat_tokenize_tokenize = tokenize.tokenize
compat_urllib_error = urllib.error
compat_urllib_parse_unquote = urllib.parse.unquote
compat_urllib_parse_unquote_plus = urllib.parse.unquote_plus
compat_urllib_parse_urlencode = urllib.parse.urlencode
compat_urllib_parse_urlparse = urllib.parse.urlparse
compat_urllib_request = urllib.request
compat_urlparse = compat_urllib_parse = urllib.parse
def compat_setenv(key, value, env=os.environ):
env[key] = value
__all__ = [x for x in globals() if x.startswith('compat_')]

View File

@ -2,27 +2,18 @@
import collections
import ctypes
import getpass
import html.entities
import html.parser
import http
import http.client
import http.cookiejar
import http.cookies
import http.server
import itertools
import os
import shlex
import shutil
import socket
import struct
import tokenize
import urllib.error
import urllib.parse
import urllib.request
import urllib
import xml.etree.ElementTree as etree
from subprocess import DEVNULL
from .compat_utils import passthrough_module # isort: split
from .asyncio import run as compat_asyncio_run # noqa: F401
from .re import Pattern as compat_Pattern # noqa: F401
from .re import match as compat_Match # noqa: F401
@ -30,8 +21,6 @@ from ..dependencies import Cryptodome_AES as compat_pycrypto_AES # noqa: F401
from ..dependencies import brotli as compat_brotli # noqa: F401
from ..dependencies import websockets as compat_websockets # noqa: F401
passthrough_module(__name__, '...utils', ('WINDOWS_VT_MODE', 'windows_enable_vt_mode'))
# compat_ctypes_WINFUNCTYPE = ctypes.WINFUNCTYPE
# will not work since ctypes.WINFUNCTYPE does not exist in UNIX machines
@ -39,17 +28,12 @@ def compat_ctypes_WINFUNCTYPE(*args, **kwargs):
return ctypes.WINFUNCTYPE(*args, **kwargs)
def compat_setenv(key, value, env=os.environ):
env[key] = value
compat_basestring = str
compat_collections_abc = collections.abc
compat_cookies = http.cookies
compat_etree_Element = etree.Element
compat_etree_register_namespace = etree.register_namespace
compat_filter = filter
compat_getenv = os.getenv
compat_input = input
compat_integer_types = (int, )
compat_kwargs = lambda kwargs: kwargs
@ -65,28 +49,16 @@ compat_urllib_parse_quote_plus = urllib.parse.quote_plus
compat_urllib_parse_unquote_to_bytes = urllib.parse.unquote_to_bytes
compat_urllib_parse_urlunparse = urllib.parse.urlunparse
compat_urllib_request_DataHandler = urllib.request.DataHandler
compat_urllib_request = urllib.request
compat_urllib_response = urllib.response
compat_urlretrieve = urllib.request.urlretrieve
compat_xml_parse_error = etree.ParseError
compat_xpath = lambda xpath: xpath
compat_zip = zip
workaround_optparse_bug9161 = lambda: None
compat_getpass = getpass.getpass
compat_chr = chr
compat_urllib_parse = urllib.parse
compat_itertools_count = itertools.count
compat_cookiejar = http.cookiejar
compat_cookiejar_Cookie = http.cookiejar.Cookie
compat_cookies_SimpleCookie = http.cookies.SimpleCookie
compat_get_terminal_size = shutil.get_terminal_size
compat_html_entities = html.entities
compat_html_entities_html5 = html.entities.html5
compat_tokenize_tokenize = tokenize.tokenize
compat_HTMLParser = html.parser.HTMLParser
compat_http_client = http.client
compat_http_server = http.server
compat_struct_pack = struct.pack
compat_struct_unpack = struct.unpack
compat_urllib_error = urllib.error
compat_urllib_parse_unquote_plus = urllib.parse.unquote_plus
def __getattr__(name):
if name in ('WINDOWS_VT_MODE', 'windows_enable_vt_mode'):
from .. import utils
return getattr(utils, name)
raise AttributeError(name)

View File

@ -4,6 +4,7 @@ import importlib
import sys
import types
_NO_ATTRIBUTE = object()
_Package = collections.namedtuple('Package', ('name', 'version'))
@ -30,7 +31,7 @@ def _is_package(module):
return True
def passthrough_module(parent, child, allowed_attributes=None, *, callback=lambda _: None):
def passthrough_module(parent, child, *, callback=lambda _: None):
parent_module = importlib.import_module(parent)
child_module = None # Import child module only as needed
@ -40,30 +41,22 @@ def passthrough_module(parent, child, allowed_attributes=None, *, callback=lambd
with contextlib.suppress(ImportError):
return importlib.import_module(f'.{attr}', parent)
ret = self.__from_child(attr)
if ret is _NO_ATTRIBUTE:
raise AttributeError(f'module {parent} has no attribute {attr}')
callback(attr)
return ret
def __from_child(self, attr):
if allowed_attributes is None:
if attr.startswith('__') and attr.endswith('__'):
return _NO_ATTRIBUTE
elif attr not in allowed_attributes:
return _NO_ATTRIBUTE
nonlocal child_module
child_module = child_module or importlib.import_module(child, parent)
ret = _NO_ATTRIBUTE
with contextlib.suppress(AttributeError):
return getattr(child_module, attr)
ret = getattr(child_module, attr)
if _is_package(child_module):
with contextlib.suppress(ImportError):
return importlib.import_module(f'.{attr}', child)
ret = importlib.import_module(f'.{attr}', child)
return _NO_ATTRIBUTE
if ret is _NO_ATTRIBUTE:
raise AttributeError(f'module {parent} has no attribute {attr}')
callback(attr)
return ret
# Python 3.6 does not have module level __getattr__
# https://peps.python.org/pep-0562/

View File

@ -1,7 +1,5 @@
import base64
import contextlib
import ctypes
import http.cookiejar
import json
import os
import shutil
@ -19,6 +17,7 @@ from .aes import (
aes_gcm_decrypt_and_verify_bytes,
unpad_pkcs7,
)
from .compat import compat_b64decode, compat_cookiejar_Cookie
from .dependencies import (
_SECRETSTORAGE_UNAVAILABLE_REASON,
secretstorage,
@ -143,7 +142,7 @@ def _extract_firefox_cookies(profile, logger):
total_cookie_count = len(table)
for i, (host, name, value, path, expiry, is_secure) in enumerate(table):
progress_bar.print(f'Loading cookie {i: 6d}/{total_cookie_count: 6d}')
cookie = http.cookiejar.Cookie(
cookie = compat_cookiejar_Cookie(
version=0, name=name, value=value, port=None, port_specified=False,
domain=host, domain_specified=bool(host), domain_initial_dot=host.startswith('.'),
path=path, path_specified=bool(path), secure=is_secure, expires=expiry, discard=False,
@ -298,7 +297,7 @@ def _process_chrome_cookie(decryptor, host_key, name, value, encrypted_value, pa
if value is None:
return is_encrypted, None
return is_encrypted, http.cookiejar.Cookie(
return is_encrypted, compat_cookiejar_Cookie(
version=0, name=name, value=value, port=None, port_specified=False,
domain=host_key, domain_specified=bool(host_key), domain_initial_dot=host_key.startswith('.'),
path=path, path_specified=bool(path), secure=is_secure, expires=expires_utc, discard=False,
@ -590,7 +589,7 @@ def _parse_safari_cookies_record(data, jar, logger):
p.skip_to(record_size, 'space at the end of the record')
cookie = http.cookiejar.Cookie(
cookie = compat_cookiejar_Cookie(
version=0, name=name, value=value, port=None, port_specified=False,
domain=domain, domain_specified=bool(domain), domain_initial_dot=domain.startswith('.'),
path=path, path_specified=bool(path), secure=is_secure, expires=expiration_date, discard=False,
@ -836,7 +835,7 @@ def _get_windows_v10_key(browser_root, logger):
except KeyError:
logger.error('no encrypted key in Local State')
return None
encrypted_key = base64.b64decode(base64_key)
encrypted_key = compat_b64decode(base64_key)
prefix = b'DPAPI'
if not encrypted_key.startswith(prefix):
logger.error('invalid key')

View File

@ -6,7 +6,8 @@ import sys
import time
from .fragment import FragmentFD
from ..compat import functools
from ..compat import functools # isort: split
from ..compat import compat_setenv
from ..postprocessor.ffmpeg import EXT_TO_OUT_FORMATS, FFmpegPostProcessor
from ..utils import (
Popen,
@ -402,8 +403,8 @@ class FFmpegFD(ExternalFD):
# We could switch to the following code if we are able to detect version properly
# args += ['-http_proxy', proxy]
env = os.environ.copy()
env['HTTP_PROXY'] = proxy
env['http_proxy'] = proxy
compat_setenv('HTTP_PROXY', proxy, env=env)
compat_setenv('http_proxy', proxy, env=env)
protocol = info_dict.get('protocol')

View File

@ -1,13 +1,17 @@
import base64
import io
import itertools
import struct
import time
import urllib.error
import urllib.parse
from .fragment import FragmentFD
from ..compat import compat_etree_fromstring
from ..compat import (
compat_b64decode,
compat_etree_fromstring,
compat_struct_pack,
compat_struct_unpack,
compat_urllib_error,
compat_urllib_parse_urlparse,
compat_urlparse,
)
from ..utils import fix_xml_ampersands, xpath_text
@ -31,13 +35,13 @@ class FlvReader(io.BytesIO):
# Utility functions for reading numbers and strings
def read_unsigned_long_long(self):
return struct.unpack('!Q', self.read_bytes(8))[0]
return compat_struct_unpack('!Q', self.read_bytes(8))[0]
def read_unsigned_int(self):
return struct.unpack('!I', self.read_bytes(4))[0]
return compat_struct_unpack('!I', self.read_bytes(4))[0]
def read_unsigned_char(self):
return struct.unpack('!B', self.read_bytes(1))[0]
return compat_struct_unpack('!B', self.read_bytes(1))[0]
def read_string(self):
res = b''
@ -199,11 +203,11 @@ def build_fragments_list(boot_info):
def write_unsigned_int(stream, val):
stream.write(struct.pack('!I', val))
stream.write(compat_struct_pack('!I', val))
def write_unsigned_int_24(stream, val):
stream.write(struct.pack('!I', val)[1:])
stream.write(compat_struct_pack('!I', val)[1:])
def write_flv_header(stream):
@ -297,12 +301,12 @@ class F4mFD(FragmentFD):
# 1. http://live-1-1.rutube.ru/stream/1024/HDS/SD/C2NKsS85HQNckgn5HdEmOQ/1454167650/S-s604419906/move/four/dirs/upper/1024-576p.f4m
bootstrap_url = node.get('url')
if bootstrap_url:
bootstrap_url = urllib.parse.urljoin(
bootstrap_url = compat_urlparse.urljoin(
base_url, bootstrap_url)
boot_info = self._get_bootstrap_from_url(bootstrap_url)
else:
bootstrap_url = None
bootstrap = base64.b64decode(node.text)
bootstrap = compat_b64decode(node.text)
boot_info = read_bootstrap_info(bootstrap)
return boot_info, bootstrap_url
@ -332,14 +336,14 @@ class F4mFD(FragmentFD):
# Prefer baseURL for relative URLs as per 11.2 of F4M 3.0 spec.
man_base_url = get_base_url(doc) or man_url
base_url = urllib.parse.urljoin(man_base_url, media.attrib['url'])
base_url = compat_urlparse.urljoin(man_base_url, media.attrib['url'])
bootstrap_node = doc.find(_add_ns('bootstrapInfo'))
boot_info, bootstrap_url = self._parse_bootstrap_node(
bootstrap_node, man_base_url)
live = boot_info['live']
metadata_node = media.find(_add_ns('metadata'))
if metadata_node is not None:
metadata = base64.b64decode(metadata_node.text)
metadata = compat_b64decode(metadata_node.text)
else:
metadata = None
@ -367,7 +371,7 @@ class F4mFD(FragmentFD):
if not live:
write_metadata_tag(dest_stream, metadata)
base_url_parsed = urllib.parse.urlparse(base_url)
base_url_parsed = compat_urllib_parse_urlparse(base_url)
self._start_frag_download(ctx, info_dict)
@ -407,7 +411,7 @@ class F4mFD(FragmentFD):
if box_type == b'mdat':
self._append_fragment(ctx, box_data)
break
except urllib.error.HTTPError as err:
except compat_urllib_error.HTTPError as err:
if live and (err.code == 404 or err.code == 410):
# We didn't keep up with the live window. Continue
# with the next available fragment.

View File

@ -4,14 +4,12 @@ import http.client
import json
import math
import os
import struct
import time
import urllib.error
from .common import FileDownloader
from .http import HttpFD
from ..aes import aes_cbc_decrypt_bytes, unpad_pkcs7
from ..compat import compat_os_name
from ..compat import compat_os_name, compat_struct_pack, compat_urllib_error
from ..utils import (
DownloadError,
encodeFilename,
@ -350,7 +348,7 @@ class FragmentFD(FileDownloader):
decrypt_info = fragment.get('decrypt_info')
if not decrypt_info or decrypt_info['METHOD'] != 'AES-128':
return frag_content
iv = decrypt_info.get('IV') or struct.pack('>8xq', fragment['media_sequence'])
iv = decrypt_info.get('IV') or compat_struct_pack('>8xq', fragment['media_sequence'])
decrypt_info['KEY'] = decrypt_info.get('KEY') or _get_key(info_dict.get('_decryption_key_url') or decrypt_info['URI'])
# Don't decrypt the content in tests since the data is explicitly truncated and it's not to a valid block
# size (see https://github.com/ytdl-org/youtube-dl/pull/27660). Tests only care that the correct data downloaded,
@ -459,7 +457,7 @@ class FragmentFD(FileDownloader):
if self._download_fragment(ctx, fragment['url'], info_dict, headers):
break
return
except (urllib.error.HTTPError, http.client.IncompleteRead) as err:
except (compat_urllib_error.HTTPError, http.client.IncompleteRead) as err:
# Unavailable (possibly temporary) fragments may be served.
# First we try to retry then either skip or abort.
# See https://github.com/ytdl-org/youtube-dl/issues/10165,

View File

@ -1,12 +1,12 @@
import binascii
import io
import re
import urllib.parse
from . import get_suitable_downloader
from .external import FFmpegFD
from .fragment import FragmentFD
from .. import webvtt
from ..compat import compat_urlparse
from ..dependencies import Cryptodome_AES
from ..utils import bug_reports_message, parse_m3u8_attributes, update_url_query
@ -140,7 +140,7 @@ class HlsFD(FragmentFD):
extra_query = None
extra_param_to_segment_url = info_dict.get('extra_param_to_segment_url')
if extra_param_to_segment_url:
extra_query = urllib.parse.parse_qs(extra_param_to_segment_url)
extra_query = compat_urlparse.parse_qs(extra_param_to_segment_url)
i = 0
media_sequence = 0
decrypt_info = {'METHOD': 'NONE'}
@ -162,7 +162,7 @@ class HlsFD(FragmentFD):
frag_url = (
line
if re.match(r'^https?://', line)
else urllib.parse.urljoin(man_url, line))
else compat_urlparse.urljoin(man_url, line))
if extra_query:
frag_url = update_url_query(frag_url, extra_query)
@ -187,7 +187,7 @@ class HlsFD(FragmentFD):
frag_url = (
map_info.get('URI')
if re.match(r'^https?://', map_info.get('URI'))
else urllib.parse.urljoin(man_url, map_info.get('URI')))
else compat_urlparse.urljoin(man_url, map_info.get('URI')))
if extra_query:
frag_url = update_url_query(frag_url, extra_query)
@ -215,7 +215,7 @@ class HlsFD(FragmentFD):
if 'IV' in decrypt_info:
decrypt_info['IV'] = binascii.unhexlify(decrypt_info['IV'][2:].zfill(32))
if not re.match(r'^https?://', decrypt_info['URI']):
decrypt_info['URI'] = urllib.parse.urljoin(
decrypt_info['URI'] = compat_urlparse.urljoin(
man_url, decrypt_info['URI'])
if extra_query:
decrypt_info['URI'] = update_url_query(decrypt_info['URI'], extra_query)

View File

@ -1,12 +1,11 @@
import http.client
import os
import random
import socket
import ssl
import time
import urllib.error
from .common import FileDownloader
from ..compat import compat_http_client, compat_urllib_error
from ..utils import (
ContentTooShortError,
ThrottledDownload,
@ -25,7 +24,7 @@ RESPONSE_READ_EXCEPTIONS = (
socket.timeout, # compat: py < 3.10
ConnectionError,
ssl.SSLError,
http.client.HTTPException
compat_http_client.HTTPException
)
@ -156,7 +155,7 @@ class HttpFD(FileDownloader):
ctx.resume_len = 0
ctx.open_mode = 'wb'
ctx.data_len = ctx.content_len = int_or_none(ctx.data.info().get('Content-length', None))
except urllib.error.HTTPError as err:
except compat_urllib_error.HTTPError as err:
if err.code == 416:
# Unable to resume (requested range not satisfiable)
try:
@ -164,7 +163,7 @@ class HttpFD(FileDownloader):
ctx.data = self.ydl.urlopen(
sanitized_Request(url, request_data, headers))
content_length = ctx.data.info()['Content-Length']
except urllib.error.HTTPError as err:
except compat_urllib_error.HTTPError as err:
if err.code < 500 or err.code >= 600:
raise
else:
@ -197,7 +196,7 @@ class HttpFD(FileDownloader):
# Unexpected HTTP error
raise
raise RetryDownload(err)
except urllib.error.URLError as err:
except compat_urllib_error.URLError as err:
if isinstance(err.reason, ssl.CertificateError):
raise
raise RetryDownload(err)

View File

@ -2,9 +2,9 @@ import binascii
import io
import struct
import time
import urllib.error
from .fragment import FragmentFD
from ..compat import compat_urllib_error
u8 = struct.Struct('>B')
u88 = struct.Struct('>Bx')
@ -268,7 +268,7 @@ class IsmFD(FragmentFD):
extra_state['ism_track_written'] = True
self._append_fragment(ctx, frag_content)
break
except urllib.error.HTTPError as err:
except compat_urllib_error.HTTPError as err:
count += 1
if count <= fragment_retries:
self.report_retry_fragment(err, frag_index, count, fragment_retries)

View File

@ -4,6 +4,7 @@ import subprocess
import time
from .common import FileDownloader
from ..compat import compat_str
from ..utils import (
Popen,
check_executable,
@ -142,7 +143,7 @@ class RtmpFD(FileDownloader):
if isinstance(conn, list):
for entry in conn:
basic_args += ['--conn', entry]
elif isinstance(conn, str):
elif isinstance(conn, compat_str):
basic_args += ['--conn', conn]
if protocol is not None:
basic_args += ['--protocol', protocol]

View File

@ -1,8 +1,8 @@
import json
import time
import urllib.error
from .fragment import FragmentFD
from ..compat import compat_urllib_error
from ..utils import RegexNotFoundError, dict_get, int_or_none, try_get
@ -128,7 +128,7 @@ class YoutubeLiveChatFD(FragmentFD):
elif info_dict['protocol'] == 'youtube_live_chat':
continuation_id, offset, click_tracking_params = parse_actions_live(live_chat_continuation)
return True, continuation_id, offset, click_tracking_params
except urllib.error.HTTPError as err:
except compat_urllib_error.HTTPError as err:
count += 1
if count <= fragment_retries:
self.report_retry_fragment(err, frag_index, count, fragment_retries)

View File

@ -7,13 +7,12 @@ import json
import re
import struct
import time
import urllib.parse
import urllib.request
import urllib.response
import uuid
from .common import InfoExtractor
from ..aes import aes_ecb_decrypt
from ..compat import compat_urllib_parse_urlparse, compat_urllib_request
from ..utils import (
ExtractorError,
bytes_to_intlist,
@ -34,7 +33,7 @@ def add_opener(ydl, handler):
''' Add a handler for opening URLs, like _download_webpage '''
# https://github.com/python/cpython/blob/main/Lib/urllib/request.py#L426
# https://github.com/python/cpython/blob/main/Lib/urllib/request.py#L605
assert isinstance(ydl._opener, urllib.request.OpenerDirector)
assert isinstance(ydl._opener, compat_urllib_request.OpenerDirector)
ydl._opener.add_handler(handler)
@ -47,7 +46,7 @@ def remove_opener(ydl, handler):
# https://github.com/python/cpython/blob/main/Lib/urllib/request.py#L426
# https://github.com/python/cpython/blob/main/Lib/urllib/request.py#L605
opener = ydl._opener
assert isinstance(ydl._opener, urllib.request.OpenerDirector)
assert isinstance(ydl._opener, compat_urllib_request.OpenerDirector)
if isinstance(handler, (type, tuple)):
find_cp = lambda x: isinstance(x, handler)
else:
@ -97,7 +96,7 @@ def remove_opener(ydl, handler):
opener.handlers[:] = [x for x in opener.handlers if not find_cp(x)]
class AbemaLicenseHandler(urllib.request.BaseHandler):
class AbemaLicenseHandler(compat_urllib_request.BaseHandler):
handler_order = 499
STRTABLE = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
HKEY = b'3AF0298C219469522A313570E8583005A642E73EDD58E3EA2FB7339D3DF1597E'
@ -137,7 +136,7 @@ class AbemaLicenseHandler(urllib.request.BaseHandler):
def abematv_license_open(self, url):
url = request_to_url(url)
ticket = urllib.parse.urlparse(url).netloc
ticket = compat_urllib_parse_urlparse(url).netloc
response_data = self._get_videokey_from_ticket(ticket)
return urllib.response.addinfourl(io.BytesIO(response_data), headers={
'Content-Length': len(response_data),

View File

@ -1,4 +1,3 @@
import getpass
import json
import re
import time
@ -6,14 +5,18 @@ import urllib.error
import xml.etree.ElementTree as etree
from .common import InfoExtractor
from ..compat import compat_urlparse
from ..utils import (
NO_DEFAULT,
ExtractorError,
unescapeHTML,
unified_timestamp,
urlencode_postdata,
from ..compat import (
compat_urlparse,
compat_getpass
)
from ..utils import (
unescapeHTML,
urlencode_postdata,
unified_timestamp,
ExtractorError,
NO_DEFAULT,
)
MSO_INFO = {
'DTV': {
@ -1503,7 +1506,7 @@ class AdobePassIE(InfoExtractor):
'send_confirm_link': False,
'send_token': True
}))
philo_code = getpass.getpass('Type auth code you have received [Return]: ')
philo_code = compat_getpass('Type auth code you have received [Return]: ')
self._download_webpage(
'https://idp.philo.com/auth/update/login_code', video_id, 'Submitting token', data=urlencode_postdata({
'token': philo_code

View File

@ -1,34 +1,36 @@
import json
import re
import urllib.parse
import json
from .common import InfoExtractor
from .youtube import YoutubeBaseInfoExtractor, YoutubeIE
from ..compat import compat_HTTPError, compat_urllib_parse_unquote
from .youtube import YoutubeIE, YoutubeBaseInfoExtractor
from ..compat import (
compat_urllib_parse_unquote,
compat_urllib_parse_unquote_plus,
compat_HTTPError
)
from ..utils import (
KNOWN_EXTENSIONS,
ExtractorError,
HEADRequest,
bug_reports_message,
clean_html,
dict_get,
extract_attributes,
ExtractorError,
get_element_by_id,
HEADRequest,
int_or_none,
join_nonempty,
KNOWN_EXTENSIONS,
merge_dicts,
mimetype2ext,
orderedSet,
parse_duration,
parse_qs,
str_or_none,
str_to_int,
str_or_none,
traverse_obj,
try_get,
unified_strdate,
unified_timestamp,
url_or_none,
urlhandle_detect_ext,
url_or_none
)
@ -141,7 +143,7 @@ class ArchiveOrgIE(InfoExtractor):
return json.loads(extract_attributes(element)['value'])
def _real_extract(self, url):
video_id = urllib.parse.unquote_plus(self._match_id(url))
video_id = compat_urllib_parse_unquote_plus(self._match_id(url))
identifier, entry_id = (video_id.split('/', 1) + [None])[:2]
# Archive.org metadata API doesn't clearly demarcate playlist entries

View File

@ -1,8 +1,8 @@
import random
from .common import InfoExtractor
from ..compat import compat_str, compat_urllib_parse_unquote
from ..utils import ExtractorError, str_or_none, try_get
from ..utils import ExtractorError, try_get, compat_str, str_or_none
from ..compat import compat_urllib_parse_unquote
class AudiusBaseIE(InfoExtractor):

View File

@ -1,12 +1,16 @@
import xml.etree.ElementTree
import functools
import itertools
import json
import re
import urllib.error
import xml.etree.ElementTree
from .common import InfoExtractor
from ..compat import compat_HTTPError, compat_str, compat_urlparse
from ..compat import (
compat_HTTPError,
compat_str,
compat_urllib_error,
compat_urlparse,
)
from ..utils import (
ExtractorError,
OnDemandPagedList,
@ -387,7 +391,7 @@ class BBCCoUkIE(InfoExtractor):
href, programme_id, ext='mp4', entry_protocol='m3u8_native',
m3u8_id=format_id, fatal=False)
except ExtractorError as e:
if not (isinstance(e.exc_info[1], urllib.error.HTTPError)
if not (isinstance(e.exc_info[1], compat_urllib_error.HTTPError)
and e.exc_info[1].code in (403, 404)):
raise
fmts = []

View File

@ -1,9 +1,13 @@
import codecs
import json
import re
import json
from .common import InfoExtractor
from ..compat import compat_ord, compat_urllib_parse_unquote
from ..compat import (
compat_chr,
compat_ord,
compat_urllib_parse_unquote,
)
from ..utils import (
ExtractorError,
float_or_none,
@ -12,8 +16,8 @@ from ..utils import (
multipart_encode,
parse_duration,
random_birthday,
try_get,
urljoin,
try_get,
)
@ -140,7 +144,7 @@ class CDAIE(InfoExtractor):
b = []
for c in a:
f = compat_ord(c)
b.append(chr(33 + (f + 14) % 94) if 33 <= f <= 126 else chr(f))
b.append(compat_chr(33 + (f + 14) % 94) if 33 <= f <= 126 else compat_chr(f))
a = ''.join(b)
a = a.replace('.cda.mp4', '')
for p in ('.2cda.pl', '.3cda.pl'):

View File

@ -1,11 +1,11 @@
import itertools
import json
import urllib.parse
from .common import InfoExtractor
from ..compat import compat_urllib_parse_unquote_plus
from ..utils import (
ExtractorError,
clean_html,
ExtractorError,
int_or_none,
str_to_int,
url_or_none,
@ -47,8 +47,8 @@ class ChingariBaseIE(InfoExtractor):
'id': id,
'extractor_key': ChingariIE.ie_key(),
'extractor': 'Chingari',
'title': urllib.parse.unquote_plus(clean_html(post_data.get('caption'))),
'description': urllib.parse.unquote_plus(clean_html(post_data.get('caption'))),
'title': compat_urllib_parse_unquote_plus(clean_html(post_data.get('caption'))),
'description': compat_urllib_parse_unquote_plus(clean_html(post_data.get('caption'))),
'duration': media_data.get('duration'),
'thumbnail': url_or_none(thumbnail),
'like_count': post_data.get('likeCount'),

View File

@ -1,10 +1,6 @@
import base64
import collections
import getpass
import hashlib
import http.client
import http.cookiejar
import http.cookies
import itertools
import json
import math
@ -13,12 +9,24 @@ import os
import random
import sys
import time
import urllib.parse
import urllib.request
import xml.etree.ElementTree
from ..compat import functools, re # isort: split
from ..compat import compat_etree_fromstring, compat_expanduser, compat_os_name
from ..compat import (
compat_cookiejar_Cookie,
compat_cookies_SimpleCookie,
compat_etree_fromstring,
compat_expanduser,
compat_getpass,
compat_http_client,
compat_os_name,
compat_str,
compat_urllib_error,
compat_urllib_parse_unquote,
compat_urllib_parse_urlencode,
compat_urllib_request,
compat_urlparse,
)
from ..downloader import FileDownloader
from ..downloader.f4m import get_base_url, remove_encrypted_media
from ..utils import (
@ -663,7 +671,7 @@ class InfoExtractor:
if hasattr(e, 'countries'):
kwargs['countries'] = e.countries
raise type(e)(e.orig_msg, **kwargs)
except http.client.IncompleteRead as e:
except compat_http_client.IncompleteRead as e:
raise ExtractorError('A network error has occurred.', cause=e, expected=True, video_id=self.get_temp_id(url))
except (KeyError, StopIteration) as e:
raise ExtractorError('An extractor error has occurred.', cause=e, video_id=self.get_temp_id(url))
@ -722,7 +730,7 @@ class InfoExtractor:
@staticmethod
def __can_accept_status_code(err, expected_status):
assert isinstance(err, urllib.error.HTTPError)
assert isinstance(err, compat_urllib_error.HTTPError)
if expected_status is None:
return False
elif callable(expected_status):
@ -731,7 +739,7 @@ class InfoExtractor:
return err.code in variadic(expected_status)
def _create_request(self, url_or_request, data=None, headers={}, query={}):
if isinstance(url_or_request, urllib.request.Request):
if isinstance(url_or_request, compat_urllib_request.Request):
return update_Request(url_or_request, data=data, headers=headers, query=query)
if query:
url_or_request = update_url_query(url_or_request, query)
@ -771,7 +779,7 @@ class InfoExtractor:
try:
return self._downloader.urlopen(self._create_request(url_or_request, data, headers, query))
except network_exceptions as err:
if isinstance(err, urllib.error.HTTPError):
if isinstance(err, compat_urllib_error.HTTPError):
if self.__can_accept_status_code(err, expected_status):
# Retain reference to error to prevent file object from
# being closed before it can be read. Works around the
@ -799,7 +807,7 @@ class InfoExtractor:
Arguments:
url_or_request -- plain text URL as a string or
a urllib.request.Request object
a compat_urllib_request.Requestobject
video_id -- Video/playlist/item identifier (string)
Keyword arguments:
@ -827,7 +835,7 @@ class InfoExtractor:
"""
# Strip hashes from the URL (#1038)
if isinstance(url_or_request, str):
if isinstance(url_or_request, (compat_str, str)):
url_or_request = url_or_request.partition('#')[0]
urlh = self._request_webpage(url_or_request, video_id, note, errnote, fatal, data=data, headers=headers, query=query, expected_status=expected_status)
@ -1048,7 +1056,7 @@ class InfoExtractor:
while True:
try:
return self.__download_webpage(url_or_request, video_id, note, errnote, None, fatal, *args, **kwargs)
except http.client.IncompleteRead as e:
except compat_http_client.IncompleteRead as e:
try_count += 1
if try_count >= tries:
raise e
@ -1284,7 +1292,7 @@ class InfoExtractor:
if tfa is not None:
return tfa
return getpass.getpass('Type %s and press [Return]: ' % note)
return compat_getpass('Type %s and press [Return]: ' % note)
# Helper functions for extracting OpenGraph info
@staticmethod
@ -1420,7 +1428,7 @@ class InfoExtractor:
return {}
def _json_ld(self, json_ld, video_id, fatal=True, expected_type=None):
if isinstance(json_ld, str):
if isinstance(json_ld, compat_str):
json_ld = self._parse_json(json_ld, video_id, fatal=fatal)
if not json_ld:
return {}
@ -1510,7 +1518,7 @@ class InfoExtractor:
# both types can have 'name' property(inherited from 'Thing' type). [1]
# however some websites are using 'Text' type instead.
# 1. https://schema.org/VideoObject
'uploader': author.get('name') if isinstance(author, dict) else author if isinstance(author, str) else None,
'uploader': author.get('name') if isinstance(author, dict) else author if isinstance(author, compat_str) else None,
'filesize': int_or_none(float_or_none(e.get('contentSize'))),
'tbr': int_or_none(e.get('bitrate')),
'width': int_or_none(e.get('width')),
@ -2159,7 +2167,7 @@ class InfoExtractor:
]), m3u8_doc)
def format_url(url):
return url if re.match(r'^https?://', url) else urllib.parse.urljoin(m3u8_url, url)
return url if re.match(r'^https?://', url) else compat_urlparse.urljoin(m3u8_url, url)
if self.get_param('hls_split_discontinuity', False):
def _extract_m3u8_playlist_indices(manifest_url=None, m3u8_doc=None):
@ -2532,7 +2540,7 @@ class InfoExtractor:
})
continue
src_url = src if src.startswith('http') else urllib.parse.urljoin(base, src)
src_url = src if src.startswith('http') else compat_urlparse.urljoin(base, src)
src_url = src_url.strip()
if proto == 'm3u8' or src_ext == 'm3u8':
@ -2555,7 +2563,7 @@ class InfoExtractor:
'plugin': 'flowplayer-3.2.0.1',
}
f4m_url += '&' if '?' in f4m_url else '?'
f4m_url += urllib.parse.urlencode(f4m_params)
f4m_url += compat_urllib_parse_urlencode(f4m_params)
formats.extend(self._extract_f4m_formats(f4m_url, video_id, f4m_id='hds', fatal=False))
elif src_ext == 'mpd':
formats.extend(self._extract_mpd_formats(
@ -2825,7 +2833,7 @@ class InfoExtractor:
if re.match(r'^https?://', base_url):
break
if mpd_base_url and base_url.startswith('/'):
base_url = urllib.parse.urljoin(mpd_base_url, base_url)
base_url = compat_urlparse.urljoin(mpd_base_url, base_url)
elif mpd_base_url and not re.match(r'^https?://', base_url):
if not mpd_base_url.endswith('/'):
mpd_base_url += '/'
@ -3095,7 +3103,7 @@ class InfoExtractor:
sampling_rate = int_or_none(track.get('SamplingRate'))
track_url_pattern = re.sub(r'{[Bb]itrate}', track.attrib['Bitrate'], url_pattern)
track_url_pattern = urllib.parse.urljoin(ism_url, track_url_pattern)
track_url_pattern = compat_urlparse.urljoin(ism_url, track_url_pattern)
fragments = []
fragment_ctx = {
@ -3114,7 +3122,7 @@ class InfoExtractor:
fragment_ctx['duration'] = (next_fragment_time - fragment_ctx['time']) / fragment_repeat
for _ in range(fragment_repeat):
fragments.append({
'url': re.sub(r'{start[ _]time}', str(fragment_ctx['time']), track_url_pattern),
'url': re.sub(r'{start[ _]time}', compat_str(fragment_ctx['time']), track_url_pattern),
'duration': fragment_ctx['duration'] / stream_timescale,
})
fragment_ctx['time'] += fragment_ctx['duration']
@ -3358,7 +3366,7 @@ class InfoExtractor:
return formats, subtitles
def _extract_wowza_formats(self, url, video_id, m3u8_entry_protocol='m3u8_native', skip_protocols=[]):
query = urllib.parse.urlparse(url).query
query = compat_urlparse.urlparse(url).query
url = re.sub(r'/(?:manifest|playlist|jwplayer)\.(?:m3u8|f4m|mpd|smil)', '', url)
mobj = re.search(
r'(?:(?:http|rtmp|rtsp)(?P<s>s)?:)?(?P<url>//[^?]+)', url)
@ -3464,7 +3472,7 @@ class InfoExtractor:
if not isinstance(track, dict):
continue
track_kind = track.get('kind')
if not track_kind or not isinstance(track_kind, str):
if not track_kind or not isinstance(track_kind, compat_str):
continue
if track_kind.lower() not in ('captions', 'subtitles'):
continue
@ -3537,7 +3545,7 @@ class InfoExtractor:
# Often no height is provided but there is a label in
# format like "1080p", "720p SD", or 1080.
height = int_or_none(self._search_regex(
r'^(\d{3,4})[pP]?(?:\b|$)', str(source.get('label') or ''),
r'^(\d{3,4})[pP]?(?:\b|$)', compat_str(source.get('label') or ''),
'height', default=None))
a_format = {
'url': source_url,
@ -3589,15 +3597,15 @@ class InfoExtractor:
def _set_cookie(self, domain, name, value, expire_time=None, port=None,
path='/', secure=False, discard=False, rest={}, **kwargs):
cookie = http.cookiejar.Cookie(
cookie = compat_cookiejar_Cookie(
0, name, value, port, port is not None, domain, True,
domain.startswith('.'), path, True, secure, expire_time,
discard, None, None, rest)
self.cookiejar.set_cookie(cookie)
def _get_cookies(self, url):
""" Return a http.cookies.SimpleCookie with the cookies for the url """
return http.cookies.SimpleCookie(self._downloader._calc_cookies(url))
""" Return a compat_cookies_SimpleCookie with the cookies for the url """
return compat_cookies_SimpleCookie(self._downloader._calc_cookies(url))
def _apply_first_set_cookie_header(self, url_handle, cookie):
"""
@ -3763,10 +3771,10 @@ class InfoExtractor:
return headers
def _generic_id(self, url):
return urllib.parse.unquote(os.path.splitext(url.rstrip('/').split('/')[-1])[0])
return compat_urllib_parse_unquote(os.path.splitext(url.rstrip('/').split('/')[-1])[0])
def _generic_title(self, url):
return urllib.parse.unquote(os.path.splitext(url_basename(url))[0])
return compat_urllib_parse_unquote(os.path.splitext(url_basename(url))[0])
@staticmethod
def _availability(is_private=None, needs_premium=None, needs_subscription=None, needs_auth=None, is_unlisted=None):

View File

@ -1,6 +1,5 @@
import urllib.parse
from .common import InfoExtractor
from ..compat import compat_urlparse
class RtmpIE(InfoExtractor):
@ -24,7 +23,7 @@ class RtmpIE(InfoExtractor):
'formats': [{
'url': url,
'ext': 'flv',
'format_id': urllib.parse.urlparse(url).scheme,
'format_id': compat_urlparse.urlparse(url).scheme,
}],
}

View File

@ -1,20 +1,19 @@
import base64
import json
import re
import urllib.request
import xml.etree.ElementTree
import json
import zlib
from hashlib import sha1
from math import floor, pow, sqrt
import xml.etree.ElementTree
from hashlib import sha1
from math import pow, sqrt, floor
from .common import InfoExtractor
from .vrv import VRVBaseIE
from ..aes import aes_cbc_decrypt
from ..compat import (
compat_b64decode,
compat_etree_fromstring,
compat_str,
compat_urllib_parse_urlencode,
compat_urllib_request,
compat_urlparse,
)
from ..utils import (
@ -23,8 +22,8 @@ from ..utils import (
extract_attributes,
float_or_none,
format_field,
int_or_none,
intlist_to_bytes,
int_or_none,
join_nonempty,
lowercase_escape,
merge_dicts,
@ -35,6 +34,9 @@ from ..utils import (
try_get,
xpath_text,
)
from ..aes import (
aes_cbc_decrypt,
)
class CrunchyrollBaseIE(InfoExtractor):
@ -257,7 +259,7 @@ class CrunchyrollIE(CrunchyrollBaseIE, VRVBaseIE):
}
def _download_webpage(self, url_or_request, *args, **kwargs):
request = (url_or_request if isinstance(url_or_request, urllib.request.Request)
request = (url_or_request if isinstance(url_or_request, compat_urllib_request.Request)
else sanitized_Request(url_or_request))
# Accept-Language must be set explicitly to accept any language to avoid issues
# similar to https://github.com/ytdl-org/youtube-dl/issues/6797.

View File

@ -1,8 +1,12 @@
import re
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import ExtractorError, int_or_none, urlencode_postdata
from ..utils import (
int_or_none,
urlencode_postdata,
compat_str,
ExtractorError,
)
class CuriosityStreamBaseIE(InfoExtractor):
@ -46,7 +50,7 @@ class CuriosityStreamIE(CuriosityStreamBaseIE):
IE_NAME = 'curiositystream'
_VALID_URL = r'https?://(?:app\.)?curiositystream\.com/video/(?P<id>\d+)'
_TESTS = [{
'url': 'http://app.curiositystream.com/video/2',
'url': 'https://app.curiositystream.com/video/2',
'info_dict': {
'id': '2',
'ext': 'mp4',

View File

@ -1,10 +1,10 @@
import base64
import json
import re
import urllib.parse
import urllib
from .adobepass import AdobePassIE
from .common import InfoExtractor
from .adobepass import AdobePassIE
from .once import OnceIE
from ..utils import (
determine_ext,
@ -197,7 +197,7 @@ class ESPNArticleIE(InfoExtractor):
@classmethod
def suitable(cls, url):
return False if (ESPNIE.suitable(url) or WatchESPNIE.suitable(url)) else super().suitable(url)
return False if (ESPNIE.suitable(url) or WatchESPNIE.suitable(url)) else super(ESPNArticleIE, cls).suitable(url)
def _real_extract(self, url):
video_id = self._match_id(url)

View File

@ -1,18 +1,18 @@
import json
import re
import urllib.parse
from .common import InfoExtractor
from ..compat import (
compat_etree_fromstring,
compat_str,
compat_urllib_parse_unquote,
compat_urllib_parse_unquote_plus,
)
from ..utils import (
ExtractorError,
clean_html,
determine_ext,
error_to_compat_str,
ExtractorError,
float_or_none,
get_element_by_id,
get_first,
@ -467,7 +467,7 @@ class FacebookIE(InfoExtractor):
dash_manifest = video.get('dash_manifest')
if dash_manifest:
formats.extend(self._parse_mpd_formats(
compat_etree_fromstring(urllib.parse.unquote_plus(dash_manifest))))
compat_etree_fromstring(compat_urllib_parse_unquote_plus(dash_manifest))))
def process_formats(formats):
# Downloads with browser's User-Agent are rate limited. Working around

View File

@ -1,6 +1,5 @@
import os
import re
import urllib.parse
import xml.etree.ElementTree
from .ant1newsgr import Ant1NewsGrEmbedIE
@ -107,7 +106,12 @@ from .yapfiles import YapFilesIE
from .youporn import YouPornIE
from .youtube import YoutubeIE
from .zype import ZypeIE
from ..compat import compat_etree_fromstring
from ..compat import (
compat_etree_fromstring,
compat_str,
compat_urllib_parse_unquote,
compat_urlparse,
)
from ..utils import (
KNOWN_EXTENSIONS,
ExtractorError,
@ -2699,7 +2703,7 @@ class GenericIE(InfoExtractor):
title = self._html_search_meta('DC.title', webpage, fatal=True)
camtasia_url = urllib.parse.urljoin(url, camtasia_cfg)
camtasia_url = compat_urlparse.urljoin(url, camtasia_cfg)
camtasia_cfg = self._download_xml(
camtasia_url, video_id,
note='Downloading camtasia configuration',
@ -2715,7 +2719,7 @@ class GenericIE(InfoExtractor):
entries.append({
'id': os.path.splitext(url_n.text.rpartition('/')[2])[0],
'title': f'{title} - {n.tag}',
'url': urllib.parse.urljoin(url, url_n.text),
'url': compat_urlparse.urljoin(url, url_n.text),
'duration': float_or_none(n.find('./duration').text),
})
@ -2767,7 +2771,7 @@ class GenericIE(InfoExtractor):
if url.startswith('//'):
return self.url_result(self.http_scheme() + url)
parsed_url = urllib.parse.urlparse(url)
parsed_url = compat_urlparse.urlparse(url)
if not parsed_url.scheme:
default_search = self.get_param('default_search')
if default_search is None:
@ -2843,7 +2847,7 @@ class GenericIE(InfoExtractor):
m = re.match(r'^(?P<type>audio|video|application(?=/(?:ogg$|(?:vnd\.apple\.|x-)?mpegurl)))/(?P<format_id>[^;\s]+)', content_type)
if m:
self.report_detected('direct video link')
format_id = str(m.group('format_id'))
format_id = compat_str(m.group('format_id'))
subtitles = {}
if format_id.endswith('mpegurl'):
formats, subtitles = self._extract_m3u8_formats_and_subtitles(url, video_id, 'mp4')
@ -2962,7 +2966,7 @@ class GenericIE(InfoExtractor):
# Unescaping the whole page allows to handle those cases in a generic way
# FIXME: unescaping the whole page may break URLs, commenting out for now.
# There probably should be a second run of generic extractor on unescaped webpage.
# webpage = urllib.parse.unquote(webpage)
# webpage = compat_urllib_parse_unquote(webpage)
# Unescape squarespace embeds to be detected by generic extractor,
# see https://github.com/ytdl-org/youtube-dl/issues/21294
@ -3235,7 +3239,7 @@ class GenericIE(InfoExtractor):
return self.url_result(mobj.group('url'))
mobj = re.search(r'class=["\']embedly-embed["\'][^>]src=["\'][^"\']*url=(?P<url>[^&]+)', webpage)
if mobj is not None:
return self.url_result(urllib.parse.unquote(mobj.group('url')))
return self.url_result(compat_urllib_parse_unquote(mobj.group('url')))
# Look for funnyordie embed
matches = re.findall(r'<iframe[^>]+?src="(https?://(?:www\.)?funnyordie\.com/embed/[^"]+)"', webpage)
@ -3488,7 +3492,7 @@ class GenericIE(InfoExtractor):
r'<iframe[^>]+src="(?:https?:)?(?P<url>%s)"' % UDNEmbedIE._PROTOCOL_RELATIVE_VALID_URL, webpage)
if mobj is not None:
return self.url_result(
urllib.parse.urljoin(url, mobj.group('url')), 'UDNEmbed')
compat_urlparse.urljoin(url, mobj.group('url')), 'UDNEmbed')
# Look for Senate ISVP iframe
senate_isvp_url = SenateISVPIE._search_iframe_url(webpage)
@ -3721,7 +3725,7 @@ class GenericIE(InfoExtractor):
if mediasite_urls:
entries = [
self.url_result(smuggle_url(
urllib.parse.urljoin(url, mediasite_url),
compat_urlparse.urljoin(url, mediasite_url),
{'UrlReferrer': url}), ie=MediasiteIE.ie_key())
for mediasite_url in mediasite_urls]
return self.playlist_result(entries, video_id, video_title)
@ -3916,11 +3920,11 @@ class GenericIE(InfoExtractor):
subtitles = {}
for source in sources:
src = source.get('src')
if not src or not isinstance(src, str):
if not src or not isinstance(src, compat_str):
continue
src = urllib.parse.urljoin(url, src)
src = compat_urlparse.urljoin(url, src)
src_type = source.get('type')
if isinstance(src_type, str):
if isinstance(src_type, compat_str):
src_type = src_type.lower()
ext = determine_ext(src).lower()
if src_type == 'video/youtube':
@ -3954,7 +3958,7 @@ class GenericIE(InfoExtractor):
if not src:
continue
subtitles.setdefault(dict_get(sub, ('language', 'srclang')) or 'und', []).append({
'url': urllib.parse.urljoin(url, src),
'url': compat_urlparse.urljoin(url, src),
'name': sub.get('label'),
'http_headers': {
'Referer': full_response.geturl(),
@ -3981,7 +3985,7 @@ class GenericIE(InfoExtractor):
return True
if RtmpIE.suitable(vurl):
return True
vpath = urllib.parse.urlparse(vurl).path
vpath = compat_urlparse.urlparse(vurl).path
vext = determine_ext(vpath, None)
return vext not in (None, 'swf', 'png', 'jpg', 'srt', 'sbv', 'sub', 'vtt', 'ttml', 'js', 'xml')
@ -4109,7 +4113,7 @@ class GenericIE(InfoExtractor):
if refresh_header:
found = re.search(REDIRECT_REGEX, refresh_header)
if found:
new_url = urllib.parse.urljoin(url, unescapeHTML(found.group(1)))
new_url = compat_urlparse.urljoin(url, unescapeHTML(found.group(1)))
if new_url != url:
self.report_following_redirect(new_url)
return {
@ -4135,8 +4139,8 @@ class GenericIE(InfoExtractor):
for video_url in orderedSet(found):
video_url = unescapeHTML(video_url)
video_url = video_url.replace('\\/', '/')
video_url = urllib.parse.urljoin(url, video_url)
video_id = urllib.parse.unquote(os.path.basename(video_url))
video_url = compat_urlparse.urljoin(url, video_url)
video_id = compat_urllib_parse_unquote(os.path.basename(video_url))
# Sometimes, jwplayer extraction will result in a YouTube URL
if YoutubeIE.suitable(video_url):

View File

@ -1,8 +1,13 @@
import itertools
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import parse_duration, parse_iso8601, qualities, str_to_int
from ..utils import (
qualities,
compat_str,
parse_duration,
parse_iso8601,
str_to_int,
)
class GigaIE(InfoExtractor):

View File

@ -1,13 +1,13 @@
import re
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
clean_html,
determine_ext,
parse_iso8601,
float_or_none,
int_or_none,
parse_iso8601,
compat_str,
determine_ext,
)

View File

@ -1,16 +1,16 @@
import itertools
import re
import urllib.parse
import urllib
from .common import InfoExtractor
from ..utils import (
int_or_none,
mimetype2ext,
remove_end,
strip_or_none,
unified_strdate,
url_or_none,
urljoin,
unified_strdate,
strip_or_none,
)

View File

@ -1,10 +1,10 @@
import random
import urllib.parse
from .common import InfoExtractor
from ..compat import compat_urllib_parse_unquote_plus
from ..utils import (
float_or_none,
int_or_none,
float_or_none,
timeconvert,
update_url_query,
xpath_text,
@ -66,7 +66,7 @@ class KUSIIE(InfoExtractor):
formats = []
for quality in quality_options:
formats.append({
'url': urllib.parse.unquote_plus(quality.attrib['url']),
'url': compat_urllib_parse_unquote_plus(quality.attrib['url']),
'height': int_or_none(quality.attrib.get('height')),
'width': int_or_none(quality.attrib.get('width')),
'vbr': float_or_none(quality.attrib.get('bitratebits'), scale=1000),

View File

@ -1,7 +1,7 @@
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
clean_html,
compat_str,
format_field,
int_or_none,
parse_iso8601,

View File

@ -1,14 +1,17 @@
import json
import re
import urllib.parse
from .common import InfoExtractor
from ..compat import compat_parse_qs, compat_urllib_parse_unquote
from ..compat import (
compat_parse_qs,
compat_urllib_parse,
compat_urllib_parse_unquote,
)
from ..utils import (
ExtractorError,
determine_ext,
get_element_by_attribute,
ExtractorError,
int_or_none,
get_element_by_attribute,
mimetype2ext,
)
@ -140,7 +143,7 @@ class MetacafeIE(InfoExtractor):
headers = {
# Disable family filter
'Cookie': 'user=%s; ' % urllib.parse.quote(json.dumps({'ffilter': False}))
'Cookie': 'user=%s; ' % compat_urllib_parse.quote(json.dumps({'ffilter': False}))
}
# AnyClip videos require the flashversion cookie so that we get the link

View File

@ -3,6 +3,7 @@ import itertools
from .common import InfoExtractor
from ..compat import (
compat_b64decode,
compat_chr,
compat_ord,
compat_str,
compat_urllib_parse_unquote,
@ -71,7 +72,7 @@ class MixcloudIE(MixcloudBaseIE):
def _decrypt_xor_cipher(key, ciphertext):
"""Encrypt/Decrypt XOR cipher. Both ways are possible because it's XOR."""
return ''.join([
chr(compat_ord(ch) ^ compat_ord(k))
compat_chr(compat_ord(ch) ^ compat_ord(k))
for ch, k in zip(ciphertext, itertools.cycle(key))])
def _real_extract(self, url):

View File

@ -1,7 +1,13 @@
import urllib.parse
from .common import InfoExtractor
from ..utils import parse_duration, remove_end, unified_strdate, urljoin
from ..compat import (
compat_urllib_parse_unquote_plus
)
from ..utils import (
parse_duration,
remove_end,
unified_strdate,
urljoin
)
class NDTVIE(InfoExtractor):
@ -74,7 +80,7 @@ class NDTVIE(InfoExtractor):
webpage = self._download_webpage(url, video_id)
# '__title' does not contain extra words such as sub-site name, "Video" etc.
title = urllib.parse.unquote_plus(
title = compat_urllib_parse_unquote_plus(
self._search_regex(r"__title\s*=\s*'([^']+)'", webpage, 'title', default=None)
or self._og_search_title(webpage))

View File

@ -1,11 +1,14 @@
import itertools
import json
import time
import urllib.error
import urllib.parse
import urllib
from ..utils import (
ExtractorError,
parse_iso8601,
try_get,
)
from .common import InfoExtractor
from ..utils import ExtractorError, parse_iso8601, try_get
class NebulaBaseIE(InfoExtractor):

View File

@ -1,12 +1,18 @@
import itertools
import re
from hashlib import md5
from base64 import b64encode
from datetime import datetime
from hashlib import md5
import re
from .common import InfoExtractor
from ..compat import compat_str, compat_urllib_parse_urlencode
from ..utils import float_or_none, sanitized_Request
from ..compat import (
compat_urllib_parse_urlencode,
compat_str,
compat_itertools_count,
)
from ..utils import (
sanitized_Request,
float_or_none,
)
class NetEaseMusicBaseIE(InfoExtractor):
@ -443,7 +449,7 @@ class NetEaseMusicDjRadioIE(NetEaseMusicBaseIE):
name = None
desc = None
entries = []
for offset in itertools.count(start=0, step=self._PAGE_SIZE):
for offset in compat_itertools_count(start=0, step=self._PAGE_SIZE):
info = self.query_api(
'dj/program/byradio?asc=false&limit=%d&radioId=%s&offset=%d'
% (self._PAGE_SIZE, dj_id, offset),

View File

@ -3,17 +3,18 @@ import random
import re
from .common import InfoExtractor
from ..compat import compat_HTTPError, compat_str
from ..compat import compat_str
from ..utils import (
ExtractorError,
compat_HTTPError,
determine_ext,
ExtractorError,
int_or_none,
parse_duration,
parse_iso8601,
str_or_none,
try_get,
url_or_none,
urljoin,
url_or_none,
)

View File

@ -1,9 +1,11 @@
import json
import re
import urllib.parse
from .common import InfoExtractor
from ..compat import compat_HTTPError
from ..compat import (
compat_HTTPError,
compat_urllib_parse,
)
from ..utils import (
ExtractorError,
float_or_none,
@ -123,7 +125,7 @@ class PelotonIE(InfoExtractor):
is_live = False
if ride_data.get('content_format') == 'audio':
url = self._MANIFEST_URL_TEMPLATE % (ride_data.get('vod_stream_url'), urllib.parse.quote(token))
url = self._MANIFEST_URL_TEMPLATE % (ride_data.get('vod_stream_url'), compat_urllib_parse.quote(token))
formats = [{
'url': url,
'ext': 'm4a',
@ -136,9 +138,9 @@ class PelotonIE(InfoExtractor):
url = 'https://members.onepeloton.com/.netlify/functions/m3u8-proxy?displayLanguage=en&acceptedSubtitles=%s&url=%s?hdnea=%s' % (
','.join([re.sub('^([a-z]+)-([A-Z]+)$', r'\1', caption) for caption in ride_data['captions']]),
ride_data['vod_stream_url'],
urllib.parse.quote(urllib.parse.quote(token)))
compat_urllib_parse.quote(compat_urllib_parse.quote(token)))
elif ride_data.get('live_stream_url'):
url = self._MANIFEST_URL_TEMPLATE % (ride_data.get('live_stream_url'), urllib.parse.quote(token))
url = self._MANIFEST_URL_TEMPLATE % (ride_data.get('live_stream_url'), compat_urllib_parse.quote(token))
is_live = True
else:
raise ExtractorError('Missing video URL')

View File

@ -1,9 +1,14 @@
import re
import urllib.parse
from .common import InfoExtractor
from ..compat import compat_urllib_parse_unquote
from ..utils import ExtractorError, clean_html
from ..compat import (
compat_urllib_parse_unquote,
compat_urllib_parse_unquote_plus,
)
from ..utils import (
clean_html,
ExtractorError,
)
class PlayvidIE(InfoExtractor):
@ -57,7 +62,7 @@ class PlayvidIE(InfoExtractor):
val = videovars_match.group(2)
if key == 'title':
video_title = urllib.parse.unquote_plus(val)
video_title = compat_urllib_parse_unquote_plus(val)
if key == 'duration':
try:
duration = int(val)

View File

@ -1,5 +1,8 @@
from .common import InfoExtractor
from ..compat import compat_b64decode
from ..compat import (
compat_b64decode,
compat_chr,
)
from ..utils import int_or_none
@ -47,7 +50,7 @@ class PopcorntimesIE(InfoExtractor):
c_ord += 13
if upper < c_ord:
c_ord -= 26
loc_b64 += chr(c_ord)
loc_b64 += compat_chr(c_ord)
video_url = compat_b64decode(loc_b64).decode('utf-8')

View File

@ -3,26 +3,29 @@ import itertools
import math
import operator
import re
import urllib.request
from .common import InfoExtractor
from ..compat import (
compat_HTTPError,
compat_str,
compat_urllib_request,
)
from .openload import PhantomJSwrapper
from ..compat import compat_HTTPError, compat_str
from ..utils import (
NO_DEFAULT,
ExtractorError,
clean_html,
determine_ext,
ExtractorError,
format_field,
int_or_none,
merge_dicts,
NO_DEFAULT,
orderedSet,
remove_quotes,
remove_start,
str_to_int,
update_url_query,
url_or_none,
urlencode_postdata,
url_or_none,
)
@ -47,7 +50,7 @@ class PornHubBaseIE(InfoExtractor):
r'document\.location\.reload\(true\)')):
url_or_request = args[0]
url = (url_or_request.get_full_url()
if isinstance(url_or_request, urllib.request.Request)
if isinstance(url_or_request, compat_urllib_request.Request)
else url_or_request)
phantom = PhantomJSwrapper(self, required_version='2.0')
phantom.get(url, html=webpage)

View File

@ -1,6 +1,9 @@
from .prosiebensat1 import ProSiebenSat1BaseIE
from ..compat import compat_str
from ..utils import parse_duration, unified_strdate
from ..utils import (
unified_strdate,
parse_duration,
compat_str,
)
class Puls4IE(ProSiebenSat1BaseIE):

View File

@ -1,12 +1,14 @@
import base64
import io
import struct
from .common import InfoExtractor
from ..compat import compat_b64decode
from ..compat import (
compat_b64decode,
compat_struct_unpack,
)
from ..utils import (
ExtractorError,
determine_ext,
ExtractorError,
float_or_none,
qualities,
remove_end,
@ -71,7 +73,7 @@ class RTVEALaCartaIE(InfoExtractor):
def _decrypt_url(png):
encrypted_data = io.BytesIO(compat_b64decode(png)[8:])
while True:
length = struct.unpack('!I', encrypted_data.read(4))[0]
length = compat_struct_unpack('!I', encrypted_data.read(4))[0]
chunk_type = encrypted_data.read(4)
if chunk_type == b'IEND':
break

View File

@ -1,8 +1,11 @@
import urllib.request
from .common import InfoExtractor
from ..compat import compat_parse_qs
from ..utils import ExtractorError
from ..compat import (
compat_parse_qs,
compat_urllib_request,
)
from ..utils import (
ExtractorError,
)
class ScreencastIE(InfoExtractor):
@ -72,7 +75,7 @@ class ScreencastIE(InfoExtractor):
flash_vars_s = flash_vars_s.replace(',', '&')
if flash_vars_s:
flash_vars = compat_parse_qs(flash_vars_s)
video_url_raw = urllib.request.quote(
video_url_raw = compat_urllib_request.quote(
flash_vars['content'][0])
video_url = video_url_raw.replace('http%3A', 'http:')

View File

@ -1,13 +1,14 @@
import urllib.parse
from .common import InfoExtractor
from ..compat import compat_b64decode
from ..compat import (
compat_b64decode,
compat_urllib_parse_unquote_plus,
)
from ..utils import (
KNOWN_EXTENSIONS,
ExtractorError,
determine_ext,
ExtractorError,
int_or_none,
js_to_json,
KNOWN_EXTENSIONS,
parse_filesize,
rot47,
url_or_none,
@ -129,7 +130,7 @@ class VivoIE(SharedBaseIE):
return stream_url
def decode_url(encoded_url):
return rot47(urllib.parse.unquote_plus(encoded_url))
return rot47(compat_urllib_parse_unquote_plus(encoded_url))
return decode_url(self._parse_json(
self._search_regex(

View File

@ -1,6 +1,6 @@
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
compat_str,
float_or_none,
int_or_none,
smuggle_url,

View File

@ -1,12 +1,16 @@
import re
import urllib.request
from .common import InfoExtractor
from ..compat import compat_HTTPError, compat_str, compat_urlparse
from ..compat import (
compat_HTTPError,
compat_str,
compat_urllib_request,
compat_urlparse,
)
from ..utils import (
ExtractorError,
determine_ext,
extract_attributes,
ExtractorError,
float_or_none,
int_or_none,
js_to_json,
@ -151,7 +155,7 @@ class UdemyIE(InfoExtractor):
headers['X-Udemy-Bearer-Token'] = cookie.value
headers['X-Udemy-Authorization'] = 'Bearer %s' % cookie.value
if isinstance(url_or_request, urllib.request.Request):
if isinstance(url_or_request, compat_urllib_request.Request):
for header, value in headers.items():
url_or_request.add_header(header, value)
else:

View File

@ -1,7 +1,10 @@
import urllib.parse
from .common import InfoExtractor
from ..utils import unified_strdate
from ..compat import (
compat_urllib_parse,
)
from ..utils import (
unified_strdate,
)
class UrortIE(InfoExtractor):
@ -28,7 +31,7 @@ class UrortIE(InfoExtractor):
def _real_extract(self, url):
playlist_id = self._match_id(url)
fstr = urllib.parse.quote("InternalBandUrl eq '%s'" % playlist_id)
fstr = compat_urllib_parse.quote("InternalBandUrl eq '%s'" % playlist_id)
json_url = 'http://urort.p3.no/breeze/urort/TrackDTOViews?$filter=%s&$orderby=Released%%20desc&$expand=Tags%%2CFiles' % fstr
songs = self._download_json(json_url, playlist_id)
entries = []

View File

@ -1,10 +1,8 @@
import random
import re
import string
import struct
from .common import InfoExtractor
from ..compat import compat_b64decode, compat_ord
from ..utils import (
ExtractorError,
int_or_none,
@ -16,6 +14,11 @@ from ..utils import (
xpath_element,
xpath_text,
)
from ..compat import (
compat_b64decode,
compat_ord,
compat_struct_pack,
)
class VideaIE(InfoExtractor):
@ -99,7 +102,7 @@ class VideaIE(InfoExtractor):
j = (j + S[i]) % 256
S[i], S[j] = S[j], S[i]
k = S[(S[i] + S[j]) % 256]
res += struct.pack('B', k ^ compat_ord(cipher_text[m]))
res += compat_struct_pack('B', k ^ compat_ord(cipher_text[m]))
return res.decode()

View File

@ -1,14 +1,17 @@
import base64
import json
import hashlib
import hmac
import json
import random
import string
import time
import urllib.parse
from .common import InfoExtractor
from ..compat import compat_HTTPError, compat_urllib_parse_urlencode
from ..compat import (
compat_HTTPError,
compat_urllib_parse_urlencode,
compat_urllib_parse,
)
from ..utils import (
ExtractorError,
float_or_none,
@ -43,12 +46,12 @@ class VRVBaseIE(InfoExtractor):
headers['Content-Type'] = 'application/json'
base_string = '&'.join([
'POST' if data else 'GET',
urllib.parse.quote(base_url, ''),
urllib.parse.quote(encoded_query, '')])
compat_urllib_parse.quote(base_url, ''),
compat_urllib_parse.quote(encoded_query, '')])
oauth_signature = base64.b64encode(hmac.new(
(self._API_PARAMS['oAuthSecret'] + '&' + self._TOKEN_SECRET).encode('ascii'),
base_string.encode(), hashlib.sha1).digest()).decode()
encoded_query += '&oauth_signature=' + urllib.parse.quote(oauth_signature, '')
encoded_query += '&oauth_signature=' + compat_urllib_parse.quote(oauth_signature, '')
try:
return self._download_json(
'?'.join([base_url, encoded_query]), video_id,

Some files were not shown because too many files have changed in this diff Show More