Compare commits

..

No commits in common. "14f25df2b6233553e968df023430ca96c0b1df9f" and "379a4f161d4ad3e40932dcf5aca6e6fb9715ab28" have entirely different histories.

109 changed files with 779 additions and 738 deletions

View File

@ -1,12 +1,9 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
# Allow direct execution
import os import os
import sys import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import yt_dlp import yt_dlp
BASH_COMPLETION_FILE = "completions/bash/yt-dlp" BASH_COMPLETION_FILE = "completions/bash/yt-dlp"

View File

@ -13,12 +13,10 @@ import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import urllib.parse
import urllib.request
from test.helper import gettestcases from test.helper import gettestcases
from yt_dlp.utils import compat_urllib_parse_urlparse, compat_urllib_request
if len(sys.argv) > 1: if len(sys.argv) > 1:
METHOD = 'LIST' METHOD = 'LIST'
LIST = open(sys.argv[1]).read().decode('utf8').strip() LIST = open(sys.argv[1]).read().decode('utf8').strip()
@ -28,7 +26,7 @@ else:
for test in gettestcases(): for test in gettestcases():
if METHOD == 'EURISTIC': if METHOD == 'EURISTIC':
try: try:
webpage = urllib.request.urlopen(test['url'], timeout=10).read() webpage = compat_urllib_request.urlopen(test['url'], timeout=10).read()
except Exception: except Exception:
print('\nFail: {}'.format(test['name'])) print('\nFail: {}'.format(test['name']))
continue continue
@ -38,7 +36,7 @@ for test in gettestcases():
RESULT = 'porn' in webpage.lower() RESULT = 'porn' in webpage.lower()
elif METHOD == 'LIST': elif METHOD == 'LIST':
domain = urllib.parse.urlparse(test['url']).netloc domain = compat_urllib_parse_urlparse(test['url']).netloc
if not domain: if not domain:
print('\nFail: {}'.format(test['name'])) print('\nFail: {}'.format(test['name']))
continue continue

View File

@ -1,14 +1,10 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
import optparse
# Allow direct execution
import os import os
import sys import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import optparse
import yt_dlp import yt_dlp
from yt_dlp.utils import shell_quote from yt_dlp.utils import shell_quote

View File

@ -1,15 +1,11 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
import codecs
# Allow direct execution
import os import os
import subprocess
import sys import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import codecs
import subprocess
from yt_dlp.aes import aes_encrypt, key_expansion from yt_dlp.aes import aes_encrypt, key_expansion
from yt_dlp.utils import intlist_to_bytes from yt_dlp.utils import intlist_to_bytes

View File

@ -1,5 +1,4 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
import optparse import optparse
import re import re

View File

@ -1,12 +1,4 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
# Allow direct execution
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import optparse import optparse
@ -15,7 +7,7 @@ def read(fname):
return f.read() return f.read()
# Get the version without importing the package # Get the version from yt_dlp/version.py without importing the package
def read_version(fname): def read_version(fname):
exec(compile(read(fname), fname, 'exec')) exec(compile(read(fname), fname, 'exec'))
return locals()['__version__'] return locals()['__version__']

View File

@ -1,15 +1,12 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
import optparse
# Allow direct execution
import os import os
import sys import sys
from inspect import getsource
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import optparse
from inspect import getsource
NO_ATTR = object() NO_ATTR = object()
STATIC_CLASS_PROPERTIES = ['IE_NAME', 'IE_DESC', 'SEARCH_KEY', '_WORKING', '_NETRC_MACHINE', 'age_limit'] STATIC_CLASS_PROPERTIES = ['IE_NAME', 'IE_DESC', 'SEARCH_KEY', '_WORKING', '_NETRC_MACHINE', 'age_limit']
CLASS_METHODS = [ CLASS_METHODS = [

View File

@ -1,11 +1,7 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
""" # yt-dlp --help | make_readme.py
yt-dlp --help | make_readme.py # This must be run in a console of correct width
This must be run in a console of correct width
"""
import functools import functools
import re import re
import sys import sys

View File

@ -1,14 +1,10 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
import optparse
# Allow direct execution
import os import os
import sys import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import optparse
from yt_dlp.extractor import list_extractor_classes from yt_dlp.extractor import list_extractor_classes

View File

@ -1,5 +1,4 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
import optparse import optparse
import os.path import os.path
import re import re
@ -24,7 +23,7 @@ yt\-dlp \- A youtube-dl fork with additional features and patches
def main(): def main():
parser = optparse.OptionParser(usage='%prog OUTFILE.md') parser = optparse.OptionParser(usage='%prog OUTFILE.md')
_, args = parser.parse_args() options, args = parser.parse_args()
if len(args) != 1: if len(args) != 1:
parser.error('Expected an output filename') parser.error('Expected an output filename')

View File

@ -1,15 +1,12 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
import json
# Allow direct execution
import os import os
import re
import sys import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from yt_dlp.compat import compat_urllib_request
import json
import re
import urllib.request
# usage: python3 ./devscripts/update-formulae.py <path-to-formulae-rb> <version> # usage: python3 ./devscripts/update-formulae.py <path-to-formulae-rb> <version>
# version can be either 0-aligned (yt-dlp version) or normalized (PyPl version) # version can be either 0-aligned (yt-dlp version) or normalized (PyPl version)
@ -18,7 +15,7 @@ filename, version = sys.argv[1:]
normalized_version = '.'.join(str(int(x)) for x in version.split('.')) normalized_version = '.'.join(str(int(x)) for x in version.split('.'))
pypi_release = json.loads(urllib.request.urlopen( pypi_release = json.loads(compat_urllib_request.urlopen(
'https://pypi.org/pypi/yt-dlp/%s/json' % normalized_version 'https://pypi.org/pypi/yt-dlp/%s/json' % normalized_version
).read().decode()) ).read().decode())

View File

@ -1,12 +1,4 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
# Allow direct execution
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import subprocess import subprocess
import sys import sys
from datetime import datetime from datetime import datetime

View File

@ -1,12 +1,9 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
# Allow direct execution
import os import os
import sys import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import yt_dlp import yt_dlp
ZSH_COMPLETION_FILE = "completions/zsh/_yt-dlp" ZSH_COMPLETION_FILE = "completions/zsh/_yt-dlp"

View File

@ -1,5 +1,4 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
import os import os
import platform import platform
import sys import sys

View File

@ -37,5 +37,3 @@ line_length = 80
reverse_relative = true reverse_relative = true
ensure_newline_before_comments = true ensure_newline_before_comments = true
include_trailing_comma = true include_trailing_comma = true
known_first_party =
test

View File

@ -1,5 +1,4 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
import os.path import os.path
import sys import sys
import warnings import warnings

View File

@ -9,7 +9,7 @@ import types
import yt_dlp.extractor import yt_dlp.extractor
from yt_dlp import YoutubeDL from yt_dlp import YoutubeDL
from yt_dlp.compat import compat_os_name from yt_dlp.compat import compat_os_name, compat_str
from yt_dlp.utils import preferredencoding, write_string from yt_dlp.utils import preferredencoding, write_string
if 'pytest' in sys.modules: if 'pytest' in sys.modules:
@ -96,29 +96,29 @@ md5 = lambda s: hashlib.md5(s.encode()).hexdigest()
def expect_value(self, got, expected, field): def expect_value(self, got, expected, field):
if isinstance(expected, str) and expected.startswith('re:'): if isinstance(expected, compat_str) and expected.startswith('re:'):
match_str = expected[len('re:'):] match_str = expected[len('re:'):]
match_rex = re.compile(match_str) match_rex = re.compile(match_str)
self.assertTrue( self.assertTrue(
isinstance(got, str), isinstance(got, compat_str),
f'Expected a {str.__name__} object, but got {type(got).__name__} for field {field}') f'Expected a {compat_str.__name__} object, but got {type(got).__name__} for field {field}')
self.assertTrue( self.assertTrue(
match_rex.match(got), match_rex.match(got),
f'field {field} (value: {got!r}) should match {match_str!r}') f'field {field} (value: {got!r}) should match {match_str!r}')
elif isinstance(expected, str) and expected.startswith('startswith:'): elif isinstance(expected, compat_str) and expected.startswith('startswith:'):
start_str = expected[len('startswith:'):] start_str = expected[len('startswith:'):]
self.assertTrue( self.assertTrue(
isinstance(got, str), isinstance(got, compat_str),
f'Expected a {str.__name__} object, but got {type(got).__name__} for field {field}') f'Expected a {compat_str.__name__} object, but got {type(got).__name__} for field {field}')
self.assertTrue( self.assertTrue(
got.startswith(start_str), got.startswith(start_str),
f'field {field} (value: {got!r}) should start with {start_str!r}') f'field {field} (value: {got!r}) should start with {start_str!r}')
elif isinstance(expected, str) and expected.startswith('contains:'): elif isinstance(expected, compat_str) and expected.startswith('contains:'):
contains_str = expected[len('contains:'):] contains_str = expected[len('contains:'):]
self.assertTrue( self.assertTrue(
isinstance(got, str), isinstance(got, compat_str),
f'Expected a {str.__name__} object, but got {type(got).__name__} for field {field}') f'Expected a {compat_str.__name__} object, but got {type(got).__name__} for field {field}')
self.assertTrue( self.assertTrue(
contains_str in got, contains_str in got,
f'field {field} (value: {got!r}) should contain {contains_str!r}') f'field {field} (value: {got!r}) should contain {contains_str!r}')
@ -142,12 +142,12 @@ def expect_value(self, got, expected, field):
index, field, type_expected, type_got)) index, field, type_expected, type_got))
expect_value(self, item_got, item_expected, field) expect_value(self, item_got, item_expected, field)
else: else:
if isinstance(expected, str) and expected.startswith('md5:'): if isinstance(expected, compat_str) and expected.startswith('md5:'):
self.assertTrue( self.assertTrue(
isinstance(got, str), isinstance(got, compat_str),
f'Expected field {field} to be a unicode object, but got value {got!r} of type {type(got)!r}') f'Expected field {field} to be a unicode object, but got value {got!r} of type {type(got)!r}')
got = 'md5:' + md5(got) got = 'md5:' + md5(got)
elif isinstance(expected, str) and re.match(r'^(?:min|max)?count:\d+', expected): elif isinstance(expected, compat_str) and re.match(r'^(?:min|max)?count:\d+', expected):
self.assertTrue( self.assertTrue(
isinstance(got, (list, dict)), isinstance(got, (list, dict)),
f'Expected field {field} to be a list or a dict, but it is of type {type(got).__name__}') f'Expected field {field} to be a list or a dict, but it is of type {type(got).__name__}')
@ -236,7 +236,7 @@ def expect_info_dict(self, got_dict, expected_dict):
missing_keys = set(test_info_dict.keys()) - set(expected_dict.keys()) missing_keys = set(test_info_dict.keys()) - set(expected_dict.keys())
if missing_keys: if missing_keys:
def _repr(v): def _repr(v):
if isinstance(v, str): if isinstance(v, compat_str):
return "'%s'" % v.replace('\\', '\\\\').replace("'", "\\'").replace('\n', '\\n') return "'%s'" % v.replace('\\', '\\\\').replace("'", "\\'").replace('\n', '\\n')
elif isinstance(v, type): elif isinstance(v, type):
return v.__name__ return v.__name__

View File

@ -1,5 +1,4 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
# Allow direct execution # Allow direct execution
import os import os
import sys import sys
@ -7,12 +6,10 @@ import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import http.server
import threading import threading
from test.helper import FakeYDL, expect_dict, expect_value, http_server_port from test.helper import FakeYDL, expect_dict, expect_value, http_server_port
from yt_dlp.compat import compat_etree_fromstring
from yt_dlp.compat import compat_etree_fromstring, compat_http_server
from yt_dlp.extractor import YoutubeIE, get_info_extractor from yt_dlp.extractor import YoutubeIE, get_info_extractor
from yt_dlp.extractor.common import InfoExtractor from yt_dlp.extractor.common import InfoExtractor
from yt_dlp.utils import ( from yt_dlp.utils import (
@ -26,7 +23,7 @@ TEAPOT_RESPONSE_STATUS = 418
TEAPOT_RESPONSE_BODY = "<h1>418 I'm a teapot</h1>" TEAPOT_RESPONSE_BODY = "<h1>418 I'm a teapot</h1>"
class InfoExtractorTestRequestHandler(http.server.BaseHTTPRequestHandler): class InfoExtractorTestRequestHandler(compat_http_server.BaseHTTPRequestHandler):
def log_message(self, format, *args): def log_message(self, format, *args):
pass pass
@ -1658,7 +1655,7 @@ jwplayer("mediaplayer").setup({"abouttext":"Visit Indie DB","aboutlink":"http:\/
# or the underlying `_download_webpage_handle` returning no content # or the underlying `_download_webpage_handle` returning no content
# when a response matches `expected_status`. # when a response matches `expected_status`.
httpd = http.server.HTTPServer( httpd = compat_http_server.HTTPServer(
('127.0.0.1', 0), InfoExtractorTestRequestHandler) ('127.0.0.1', 0), InfoExtractorTestRequestHandler)
port = http_server_port(httpd) port = http_server_port(httpd)
server_thread = threading.Thread(target=httpd.serve_forever) server_thread = threading.Thread(target=httpd.serve_forever)

View File

@ -1,5 +1,4 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
# Allow direct execution # Allow direct execution
import os import os
import sys import sys
@ -7,14 +6,17 @@ import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import copy import copy
import json import json
import urllib.error
from test.helper import FakeYDL, assertRegexpMatches from test.helper import FakeYDL, assertRegexpMatches
from yt_dlp import YoutubeDL from yt_dlp import YoutubeDL
from yt_dlp.compat import compat_os_name from yt_dlp.compat import (
compat_os_name,
compat_setenv,
compat_str,
compat_urllib_error,
)
from yt_dlp.extractor import YoutubeIE from yt_dlp.extractor import YoutubeIE
from yt_dlp.extractor.common import InfoExtractor from yt_dlp.extractor.common import InfoExtractor
from yt_dlp.postprocessor.common import PostProcessor from yt_dlp.postprocessor.common import PostProcessor
@ -839,14 +841,14 @@ class TestYoutubeDL(unittest.TestCase):
# test('%(foo|)s', ('', '_')) # fixme # test('%(foo|)s', ('', '_')) # fixme
# Environment variable expansion for prepare_filename # Environment variable expansion for prepare_filename
os.environ['__yt_dlp_var'] = 'expanded' compat_setenv('__yt_dlp_var', 'expanded')
envvar = '%__yt_dlp_var%' if compat_os_name == 'nt' else '$__yt_dlp_var' envvar = '%__yt_dlp_var%' if compat_os_name == 'nt' else '$__yt_dlp_var'
test(envvar, (envvar, 'expanded')) test(envvar, (envvar, 'expanded'))
if compat_os_name == 'nt': if compat_os_name == 'nt':
test('%s%', ('%s%', '%s%')) test('%s%', ('%s%', '%s%'))
os.environ['s'] = 'expanded' compat_setenv('s', 'expanded')
test('%s%', ('%s%', 'expanded')) # %s% should be expanded before escaping %s test('%s%', ('%s%', 'expanded')) # %s% should be expanded before escaping %s
os.environ['(test)s'] = 'expanded' compat_setenv('(test)s', 'expanded')
test('%(test)s%', ('NA%', 'expanded')) # Environment should take priority over template test('%(test)s%', ('NA%', 'expanded')) # Environment should take priority over template
# Path expansion and escaping # Path expansion and escaping
@ -1099,7 +1101,7 @@ class TestYoutubeDL(unittest.TestCase):
def test_urlopen_no_file_protocol(self): def test_urlopen_no_file_protocol(self):
# see https://github.com/ytdl-org/youtube-dl/issues/8227 # see https://github.com/ytdl-org/youtube-dl/issues/8227
ydl = YDL() ydl = YDL()
self.assertRaises(urllib.error.URLError, ydl.urlopen, 'file:///etc/passwd') self.assertRaises(compat_urllib_error.URLError, ydl.urlopen, 'file:///etc/passwd')
def test_do_not_override_ie_key_in_url_transparent(self): def test_do_not_override_ie_key_in_url_transparent(self):
ydl = YDL() ydl = YDL()
@ -1185,7 +1187,7 @@ class TestYoutubeDL(unittest.TestCase):
def _entries(self): def _entries(self):
for n in range(3): for n in range(3):
video_id = str(n) video_id = compat_str(n)
yield { yield {
'_type': 'url_transparent', '_type': 'url_transparent',
'ie_key': VideoIE.ie_key(), 'ie_key': VideoIE.ie_key(),

View File

@ -1,16 +1,12 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
# Allow direct execution
import os import os
import re
import sys import sys
import tempfile
import unittest import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import re
import tempfile
from yt_dlp.utils import YoutubeDLCookieJar from yt_dlp.utils import YoutubeDLCookieJar

View File

@ -1,5 +1,4 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
# Allow direct execution # Allow direct execution
import os import os
import sys import sys
@ -7,7 +6,6 @@ import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import base64 import base64
from yt_dlp.aes import ( from yt_dlp.aes import (

View File

@ -1,5 +1,4 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
# Allow direct execution # Allow direct execution
import os import os
import sys import sys
@ -7,8 +6,8 @@ import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from test.helper import is_download_test, try_rm from test.helper import is_download_test, try_rm
from yt_dlp import YoutubeDL from yt_dlp import YoutubeDL

View File

@ -1,6 +1,6 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
# Allow direct execution # Allow direct execution
import collections
import os import os
import sys import sys
import unittest import unittest
@ -8,9 +8,8 @@ import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import collections
from test.helper import gettestcases from test.helper import gettestcases
from yt_dlp.extractor import FacebookIE, YoutubeIE, gen_extractors from yt_dlp.extractor import FacebookIE, YoutubeIE, gen_extractors

View File

@ -1,16 +1,15 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
# Allow direct execution # Allow direct execution
import os import os
import shutil
import sys import sys
import unittest import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import shutil
from test.helper import FakeYDL from test.helper import FakeYDL
from yt_dlp.cache import Cache from yt_dlp.cache import Cache

View File

@ -1,5 +1,4 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
# Allow direct execution # Allow direct execution
import os import os
import sys import sys
@ -8,14 +7,16 @@ import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import struct
import urllib.parse
from yt_dlp import compat from yt_dlp import compat
from yt_dlp.compat import ( from yt_dlp.compat import (
compat_etree_fromstring, compat_etree_fromstring,
compat_expanduser, compat_expanduser,
compat_getenv,
compat_setenv,
compat_str,
compat_struct_unpack,
compat_urllib_parse_unquote, compat_urllib_parse_unquote,
compat_urllib_parse_unquote_plus,
compat_urllib_parse_urlencode, compat_urllib_parse_urlencode,
) )
@ -25,19 +26,28 @@ class TestCompat(unittest.TestCase):
with self.assertWarns(DeprecationWarning): with self.assertWarns(DeprecationWarning):
compat.compat_basestring compat.compat_basestring
with self.assertWarns(DeprecationWarning):
compat.WINDOWS_VT_MODE
compat.asyncio.events # Must not raise error compat.asyncio.events # Must not raise error
def test_compat_getenv(self):
test_str = 'тест'
compat_setenv('yt_dlp_COMPAT_GETENV', test_str)
self.assertEqual(compat_getenv('yt_dlp_COMPAT_GETENV'), test_str)
def test_compat_setenv(self):
test_var = 'yt_dlp_COMPAT_SETENV'
test_str = 'тест'
compat_setenv(test_var, test_str)
compat_getenv(test_var)
self.assertEqual(compat_getenv(test_var), test_str)
def test_compat_expanduser(self): def test_compat_expanduser(self):
old_home = os.environ.get('HOME') old_home = os.environ.get('HOME')
test_str = R'C:\Documents and Settings\тест\Application Data' test_str = R'C:\Documents and Settings\тест\Application Data'
try: try:
os.environ['HOME'] = test_str compat_setenv('HOME', test_str)
self.assertEqual(compat_expanduser('~'), test_str) self.assertEqual(compat_expanduser('~'), test_str)
finally: finally:
os.environ['HOME'] = old_home or '' compat_setenv('HOME', old_home or '')
def test_compat_urllib_parse_unquote(self): def test_compat_urllib_parse_unquote(self):
self.assertEqual(compat_urllib_parse_unquote('abc%20def'), 'abc def') self.assertEqual(compat_urllib_parse_unquote('abc%20def'), 'abc def')
@ -59,8 +69,8 @@ class TestCompat(unittest.TestCase):
'''(^◣_◢^)っ︻デ═一 ⇀ ⇀ ⇀ ⇀ ⇀ ↶%I%Break%Things%''') '''(^◣_◢^)っ︻デ═一 ⇀ ⇀ ⇀ ⇀ ⇀ ↶%I%Break%Things%''')
def test_compat_urllib_parse_unquote_plus(self): def test_compat_urllib_parse_unquote_plus(self):
self.assertEqual(urllib.parse.unquote_plus('abc%20def'), 'abc def') self.assertEqual(compat_urllib_parse_unquote_plus('abc%20def'), 'abc def')
self.assertEqual(urllib.parse.unquote_plus('%7e/abc+def'), '~/abc def') self.assertEqual(compat_urllib_parse_unquote_plus('%7e/abc+def'), '~/abc def')
def test_compat_urllib_parse_urlencode(self): def test_compat_urllib_parse_urlencode(self):
self.assertEqual(compat_urllib_parse_urlencode({'abc': 'def'}), 'abc=def') self.assertEqual(compat_urllib_parse_urlencode({'abc': 'def'}), 'abc=def')
@ -81,11 +91,11 @@ class TestCompat(unittest.TestCase):
</root> </root>
''' '''
doc = compat_etree_fromstring(xml.encode()) doc = compat_etree_fromstring(xml.encode())
self.assertTrue(isinstance(doc.attrib['foo'], str)) self.assertTrue(isinstance(doc.attrib['foo'], compat_str))
self.assertTrue(isinstance(doc.attrib['spam'], str)) self.assertTrue(isinstance(doc.attrib['spam'], compat_str))
self.assertTrue(isinstance(doc.find('normal').text, str)) self.assertTrue(isinstance(doc.find('normal').text, compat_str))
self.assertTrue(isinstance(doc.find('chinese').text, str)) self.assertTrue(isinstance(doc.find('chinese').text, compat_str))
self.assertTrue(isinstance(doc.find('foo/bar').text, str)) self.assertTrue(isinstance(doc.find('foo/bar').text, compat_str))
def test_compat_etree_fromstring_doctype(self): def test_compat_etree_fromstring_doctype(self):
xml = '''<?xml version="1.0"?> xml = '''<?xml version="1.0"?>
@ -94,7 +104,7 @@ class TestCompat(unittest.TestCase):
compat_etree_fromstring(xml) compat_etree_fromstring(xml)
def test_struct_unpack(self): def test_struct_unpack(self):
self.assertEqual(struct.unpack('!B', b'\x00'), (0,)) self.assertEqual(compat_struct_unpack('!B', b'\x00'), (0,))
if __name__ == '__main__': if __name__ == '__main__':

View File

@ -1,19 +1,14 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
# Allow direct execution # Allow direct execution
import hashlib
import json
import os import os
import socket
import sys import sys
import unittest import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import hashlib
import http.client
import json
import socket
import urllib.error
from test.helper import ( from test.helper import (
assertGreaterEqual, assertGreaterEqual,
expect_info_dict, expect_info_dict,
@ -25,7 +20,12 @@ from test.helper import (
try_rm, try_rm,
) )
import yt_dlp.YoutubeDL # isort: split import yt_dlp.YoutubeDL
from yt_dlp.compat import (
compat_http_client,
compat_HTTPError,
compat_urllib_error,
)
from yt_dlp.extractor import get_info_extractor from yt_dlp.extractor import get_info_extractor
from yt_dlp.utils import ( from yt_dlp.utils import (
DownloadError, DownloadError,
@ -167,7 +167,7 @@ def generator(test_case, tname):
force_generic_extractor=params.get('force_generic_extractor', False)) force_generic_extractor=params.get('force_generic_extractor', False))
except (DownloadError, ExtractorError) as err: except (DownloadError, ExtractorError) as err:
# Check if the exception is not a network related one # Check if the exception is not a network related one
if not err.exc_info[0] in (urllib.error.URLError, socket.timeout, UnavailableVideoError, http.client.BadStatusLine) or (err.exc_info[0] == urllib.error.HTTPError and err.exc_info[1].code == 503): if not err.exc_info[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError, compat_http_client.BadStatusLine) or (err.exc_info[0] == compat_HTTPError and err.exc_info[1].code == 503):
raise raise
if try_num == RETRIES: if try_num == RETRIES:

View File

@ -1,19 +1,17 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
# Allow direct execution # Allow direct execution
import os import os
import re
import sys import sys
import unittest import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import http.server
import re
import threading import threading
from test.helper import http_server_port, try_rm from test.helper import http_server_port, try_rm
from yt_dlp import YoutubeDL from yt_dlp import YoutubeDL
from yt_dlp.compat import compat_http_server
from yt_dlp.downloader.http import HttpFD from yt_dlp.downloader.http import HttpFD
from yt_dlp.utils import encodeFilename from yt_dlp.utils import encodeFilename
@ -23,7 +21,7 @@ TEST_DIR = os.path.dirname(os.path.abspath(__file__))
TEST_SIZE = 10 * 1024 TEST_SIZE = 10 * 1024
class HTTPTestRequestHandler(http.server.BaseHTTPRequestHandler): class HTTPTestRequestHandler(compat_http_server.BaseHTTPRequestHandler):
def log_message(self, format, *args): def log_message(self, format, *args):
pass pass
@ -80,7 +78,7 @@ class FakeLogger:
class TestHttpFD(unittest.TestCase): class TestHttpFD(unittest.TestCase):
def setUp(self): def setUp(self):
self.httpd = http.server.HTTPServer( self.httpd = compat_http_server.HTTPServer(
('127.0.0.1', 0), HTTPTestRequestHandler) ('127.0.0.1', 0), HTTPTestRequestHandler)
self.port = http_server_port(self.httpd) self.port = http_server_port(self.httpd)
self.server_thread = threading.Thread(target=self.httpd.serve_forever) self.server_thread = threading.Thread(target=self.httpd.serve_forever)

View File

@ -1,16 +1,12 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
import contextlib
# Allow direct execution
import os import os
import subprocess
import sys import sys
import unittest import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import contextlib
import subprocess
from yt_dlp.utils import encodeArgument from yt_dlp.utils import encodeArgument
rootDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) rootDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))

View File

@ -1,5 +1,4 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
# Allow direct execution # Allow direct execution
import os import os
import sys import sys
@ -7,19 +6,17 @@ import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import http.server
import ssl import ssl
import threading import threading
import urllib.request
from test.helper import http_server_port from test.helper import http_server_port
from yt_dlp import YoutubeDL from yt_dlp import YoutubeDL
from yt_dlp.compat import compat_http_server, compat_urllib_request
TEST_DIR = os.path.dirname(os.path.abspath(__file__)) TEST_DIR = os.path.dirname(os.path.abspath(__file__))
class HTTPTestRequestHandler(http.server.BaseHTTPRequestHandler): class HTTPTestRequestHandler(compat_http_server.BaseHTTPRequestHandler):
def log_message(self, format, *args): def log_message(self, format, *args):
pass pass
@ -56,7 +53,7 @@ class FakeLogger:
class TestHTTP(unittest.TestCase): class TestHTTP(unittest.TestCase):
def setUp(self): def setUp(self):
self.httpd = http.server.HTTPServer( self.httpd = compat_http_server.HTTPServer(
('127.0.0.1', 0), HTTPTestRequestHandler) ('127.0.0.1', 0), HTTPTestRequestHandler)
self.port = http_server_port(self.httpd) self.port = http_server_port(self.httpd)
self.server_thread = threading.Thread(target=self.httpd.serve_forever) self.server_thread = threading.Thread(target=self.httpd.serve_forever)
@ -67,7 +64,7 @@ class TestHTTP(unittest.TestCase):
class TestHTTPS(unittest.TestCase): class TestHTTPS(unittest.TestCase):
def setUp(self): def setUp(self):
certfn = os.path.join(TEST_DIR, 'testcert.pem') certfn = os.path.join(TEST_DIR, 'testcert.pem')
self.httpd = http.server.HTTPServer( self.httpd = compat_http_server.HTTPServer(
('127.0.0.1', 0), HTTPTestRequestHandler) ('127.0.0.1', 0), HTTPTestRequestHandler)
sslctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) sslctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
sslctx.load_cert_chain(certfn, None) sslctx.load_cert_chain(certfn, None)
@ -93,7 +90,7 @@ class TestClientCert(unittest.TestCase):
certfn = os.path.join(TEST_DIR, 'testcert.pem') certfn = os.path.join(TEST_DIR, 'testcert.pem')
self.certdir = os.path.join(TEST_DIR, 'testdata', 'certificate') self.certdir = os.path.join(TEST_DIR, 'testdata', 'certificate')
cacertfn = os.path.join(self.certdir, 'ca.crt') cacertfn = os.path.join(self.certdir, 'ca.crt')
self.httpd = http.server.HTTPServer(('127.0.0.1', 0), HTTPTestRequestHandler) self.httpd = compat_http_server.HTTPServer(('127.0.0.1', 0), HTTPTestRequestHandler)
sslctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) sslctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
sslctx.verify_mode = ssl.CERT_REQUIRED sslctx.verify_mode = ssl.CERT_REQUIRED
sslctx.load_verify_locations(cafile=cacertfn) sslctx.load_verify_locations(cafile=cacertfn)
@ -133,7 +130,7 @@ class TestClientCert(unittest.TestCase):
def _build_proxy_handler(name): def _build_proxy_handler(name):
class HTTPTestRequestHandler(http.server.BaseHTTPRequestHandler): class HTTPTestRequestHandler(compat_http_server.BaseHTTPRequestHandler):
proxy_name = name proxy_name = name
def log_message(self, format, *args): def log_message(self, format, *args):
@ -149,14 +146,14 @@ def _build_proxy_handler(name):
class TestProxy(unittest.TestCase): class TestProxy(unittest.TestCase):
def setUp(self): def setUp(self):
self.proxy = http.server.HTTPServer( self.proxy = compat_http_server.HTTPServer(
('127.0.0.1', 0), _build_proxy_handler('normal')) ('127.0.0.1', 0), _build_proxy_handler('normal'))
self.port = http_server_port(self.proxy) self.port = http_server_port(self.proxy)
self.proxy_thread = threading.Thread(target=self.proxy.serve_forever) self.proxy_thread = threading.Thread(target=self.proxy.serve_forever)
self.proxy_thread.daemon = True self.proxy_thread.daemon = True
self.proxy_thread.start() self.proxy_thread.start()
self.geo_proxy = http.server.HTTPServer( self.geo_proxy = compat_http_server.HTTPServer(
('127.0.0.1', 0), _build_proxy_handler('geo')) ('127.0.0.1', 0), _build_proxy_handler('geo'))
self.geo_port = http_server_port(self.geo_proxy) self.geo_port = http_server_port(self.geo_proxy)
self.geo_proxy_thread = threading.Thread(target=self.geo_proxy.serve_forever) self.geo_proxy_thread = threading.Thread(target=self.geo_proxy.serve_forever)
@ -173,7 +170,7 @@ class TestProxy(unittest.TestCase):
response = ydl.urlopen(url).read().decode() response = ydl.urlopen(url).read().decode()
self.assertEqual(response, f'normal: {url}') self.assertEqual(response, f'normal: {url}')
req = urllib.request.Request(url) req = compat_urllib_request.Request(url)
req.add_header('Ytdl-request-proxy', geo_proxy) req.add_header('Ytdl-request-proxy', geo_proxy)
response = ydl.urlopen(req).read().decode() response = ydl.urlopen(req).read().decode()
self.assertEqual(response, f'geo: {url}') self.assertEqual(response, f'geo: {url}')

View File

@ -1,5 +1,4 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
# Allow direct execution # Allow direct execution
import os import os
import sys import sys
@ -7,8 +6,8 @@ import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from test.helper import FakeYDL, is_download_test from test.helper import FakeYDL, is_download_test
from yt_dlp.extractor import IqiyiIE from yt_dlp.extractor import IqiyiIE

View File

@ -1,5 +1,4 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
# Allow direct execution # Allow direct execution
import os import os
import sys import sys
@ -7,7 +6,6 @@ import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from yt_dlp.jsinterp import JSInterpreter from yt_dlp.jsinterp import JSInterpreter

View File

@ -1,6 +1,3 @@
#!/usr/bin/env python3
# Allow direct execution
import os import os
import sys import sys
import unittest import unittest

View File

@ -1,15 +1,11 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
# Allow direct execution
import os import os
import subprocess
import sys import sys
import unittest import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import subprocess
from test.helper import is_download_test, try_rm from test.helper import is_download_test, try_rm
root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))

View File

@ -1,15 +1,13 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
# Allow direct execution
import os import os
import sys import sys
import unittest import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from test.helper import get_params, is_download_test, try_rm from test.helper import get_params, is_download_test, try_rm
import yt_dlp.YoutubeDL # isort: split
import yt_dlp.YoutubeDL
from yt_dlp.utils import DownloadError from yt_dlp.utils import DownloadError

View File

@ -1,5 +1,4 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
# Allow direct execution # Allow direct execution
import os import os
import sys import sys
@ -7,7 +6,6 @@ import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from yt_dlp import YoutubeDL from yt_dlp import YoutubeDL
from yt_dlp.compat import compat_shlex_quote from yt_dlp.compat import compat_shlex_quote
from yt_dlp.postprocessor import ( from yt_dlp.postprocessor import (

View File

@ -1,5 +1,4 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
# Allow direct execution # Allow direct execution
import os import os
import sys import sys
@ -7,13 +6,12 @@ import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import random import random
import subprocess import subprocess
import urllib.request
from test.helper import FakeYDL, get_params, is_download_test from test.helper import FakeYDL, get_params, is_download_test
from yt_dlp.compat import compat_str, compat_urllib_request
@is_download_test @is_download_test
class TestMultipleSocks(unittest.TestCase): class TestMultipleSocks(unittest.TestCase):
@ -53,7 +51,7 @@ class TestMultipleSocks(unittest.TestCase):
if params is None: if params is None:
return return
ydl = FakeYDL() ydl = FakeYDL()
req = urllib.request.Request('http://yt-dl.org/ip') req = compat_urllib_request.Request('http://yt-dl.org/ip')
req.add_header('Ytdl-request-proxy', params['secondary_proxy']) req.add_header('Ytdl-request-proxy', params['secondary_proxy'])
self.assertEqual( self.assertEqual(
ydl.urlopen(req).read().decode(), ydl.urlopen(req).read().decode(),
@ -64,7 +62,7 @@ class TestMultipleSocks(unittest.TestCase):
if params is None: if params is None:
return return
ydl = FakeYDL() ydl = FakeYDL()
req = urllib.request.Request('https://yt-dl.org/ip') req = compat_urllib_request.Request('https://yt-dl.org/ip')
req.add_header('Ytdl-request-proxy', params['secondary_proxy']) req.add_header('Ytdl-request-proxy', params['secondary_proxy'])
self.assertEqual( self.assertEqual(
ydl.urlopen(req).read().decode(), ydl.urlopen(req).read().decode(),
@ -101,13 +99,13 @@ class TestSocks(unittest.TestCase):
return ydl.urlopen('http://yt-dl.org/ip').read().decode() return ydl.urlopen('http://yt-dl.org/ip').read().decode()
def test_socks4(self): def test_socks4(self):
self.assertTrue(isinstance(self._get_ip('socks4'), str)) self.assertTrue(isinstance(self._get_ip('socks4'), compat_str))
def test_socks4a(self): def test_socks4a(self):
self.assertTrue(isinstance(self._get_ip('socks4a'), str)) self.assertTrue(isinstance(self._get_ip('socks4a'), compat_str))
def test_socks5(self): def test_socks5(self):
self.assertTrue(isinstance(self._get_ip('socks5'), str)) self.assertTrue(isinstance(self._get_ip('socks5'), compat_str))
if __name__ == '__main__': if __name__ == '__main__':

View File

@ -1,5 +1,4 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
# Allow direct execution # Allow direct execution
import os import os
import sys import sys
@ -7,8 +6,8 @@ import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from test.helper import FakeYDL, is_download_test, md5 from test.helper import FakeYDL, is_download_test, md5
from yt_dlp.extractor import ( from yt_dlp.extractor import (
NPOIE, NPOIE,
NRKTVIE, NRKTVIE,

View File

@ -1,5 +1,4 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
# Allow direct execution # Allow direct execution
import os import os
import sys import sys

View File

@ -1,6 +1,6 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
# Allow direct execution # Allow direct execution
import contextlib
import os import os
import sys import sys
import unittest import unittest
@ -8,16 +8,19 @@ import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import contextlib # Various small unit tests
import io import io
import itertools import itertools
import json import json
import xml.etree.ElementTree import xml.etree.ElementTree
from yt_dlp.compat import ( from yt_dlp.compat import (
compat_chr,
compat_etree_fromstring, compat_etree_fromstring,
compat_getenv,
compat_HTMLParseError, compat_HTMLParseError,
compat_os_name, compat_os_name,
compat_setenv,
) )
from yt_dlp.utils import ( from yt_dlp.utils import (
Config, Config,
@ -263,20 +266,20 @@ class TestUtil(unittest.TestCase):
def env(var): def env(var):
return f'%{var}%' if sys.platform == 'win32' else f'${var}' return f'%{var}%' if sys.platform == 'win32' else f'${var}'
os.environ['yt_dlp_EXPATH_PATH'] = 'expanded' compat_setenv('yt_dlp_EXPATH_PATH', 'expanded')
self.assertEqual(expand_path(env('yt_dlp_EXPATH_PATH')), 'expanded') self.assertEqual(expand_path(env('yt_dlp_EXPATH_PATH')), 'expanded')
old_home = os.environ.get('HOME') old_home = os.environ.get('HOME')
test_str = R'C:\Documents and Settings\тест\Application Data' test_str = R'C:\Documents and Settings\тест\Application Data'
try: try:
os.environ['HOME'] = test_str compat_setenv('HOME', test_str)
self.assertEqual(expand_path(env('HOME')), os.getenv('HOME')) self.assertEqual(expand_path(env('HOME')), compat_getenv('HOME'))
self.assertEqual(expand_path('~'), os.getenv('HOME')) self.assertEqual(expand_path('~'), compat_getenv('HOME'))
self.assertEqual( self.assertEqual(
expand_path('~/%s' % env('yt_dlp_EXPATH_PATH')), expand_path('~/%s' % env('yt_dlp_EXPATH_PATH')),
'%s/expanded' % os.getenv('HOME')) '%s/expanded' % compat_getenv('HOME'))
finally: finally:
os.environ['HOME'] = old_home or '' compat_setenv('HOME', old_home or '')
def test_prepend_extension(self): def test_prepend_extension(self):
self.assertEqual(prepend_extension('abc.ext', 'temp'), 'abc.temp.ext') self.assertEqual(prepend_extension('abc.ext', 'temp'), 'abc.temp.ext')
@ -1125,7 +1128,7 @@ class TestUtil(unittest.TestCase):
self.assertEqual(extract_attributes('<e x="décompose&#769;">'), {'x': 'décompose\u0301'}) self.assertEqual(extract_attributes('<e x="décompose&#769;">'), {'x': 'décompose\u0301'})
# "Narrow" Python builds don't support unicode code points outside BMP. # "Narrow" Python builds don't support unicode code points outside BMP.
try: try:
chr(0x10000) compat_chr(0x10000)
supports_outside_bmp = True supports_outside_bmp = True
except ValueError: except ValueError:
supports_outside_bmp = False supports_outside_bmp = False

View File

@ -1,15 +1,11 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
# Allow direct execution
import os import os
import subprocess
import sys import sys
import unittest import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import subprocess
rootDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) rootDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))

View File

@ -1,5 +1,4 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
# Allow direct execution # Allow direct execution
import os import os
import sys import sys
@ -7,12 +6,11 @@ import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import xml.etree.ElementTree import xml.etree.ElementTree
from test.helper import get_params, is_download_test, try_rm
import yt_dlp.extractor import yt_dlp.extractor
import yt_dlp.YoutubeDL import yt_dlp.YoutubeDL
from test.helper import get_params, is_download_test, try_rm
class YoutubeDL(yt_dlp.YoutubeDL): class YoutubeDL(yt_dlp.YoutubeDL):

View File

@ -1,5 +1,4 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
# Allow direct execution # Allow direct execution
import os import os
import sys import sys
@ -7,8 +6,8 @@ import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from test.helper import FakeYDL, is_download_test from test.helper import FakeYDL, is_download_test
from yt_dlp.extractor import YoutubeIE, YoutubeTabIE from yt_dlp.extractor import YoutubeIE, YoutubeTabIE

View File

@ -1,5 +1,4 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
# Allow direct execution # Allow direct execution
import os import os
import sys import sys

View File

@ -1,19 +1,18 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
# Allow direct execution # Allow direct execution
import contextlib
import os import os
import sys import sys
import unittest import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import contextlib
import re import re
import string import string
import urllib.request import urllib.request
from test.helper import FakeYDL, is_download_test from test.helper import FakeYDL, is_download_test
from yt_dlp.compat import compat_str
from yt_dlp.extractor import YoutubeIE from yt_dlp.extractor import YoutubeIE
from yt_dlp.jsinterp import JSInterpreter from yt_dlp.jsinterp import JSInterpreter
@ -158,7 +157,7 @@ def t_factory(name, sig_func, url_pattern):
def signature(jscode, sig_input): def signature(jscode, sig_input):
func = YoutubeIE(FakeYDL())._parse_sig_js(jscode) func = YoutubeIE(FakeYDL())._parse_sig_js(jscode)
src_sig = ( src_sig = (
str(string.printable[:sig_input]) compat_str(string.printable[:sig_input])
if isinstance(sig_input, int) else sig_input) if isinstance(sig_input, int) else sig_input)
return func(src_sig) return func(src_sig)

View File

@ -1,3 +1,4 @@
#!/usr/bin/env python3
import collections import collections
import contextlib import contextlib
import datetime import datetime
@ -25,8 +26,15 @@ import urllib.request
from string import ascii_letters from string import ascii_letters
from .cache import Cache from .cache import Cache
from .compat import HAS_LEGACY as compat_has_legacy from .compat import (
from .compat import compat_os_name, compat_shlex_quote HAS_LEGACY as compat_has_legacy,
compat_get_terminal_size,
compat_os_name,
compat_shlex_quote,
compat_str,
compat_urllib_error,
compat_urllib_request,
)
from .cookies import load_cookies from .cookies import load_cookies
from .downloader import FFmpegFD, get_suitable_downloader, shorten_protocol_name from .downloader import FFmpegFD, get_suitable_downloader, shorten_protocol_name
from .downloader.rtmp import rtmpdump_version from .downloader.rtmp import rtmpdump_version
@ -636,7 +644,7 @@ class YoutubeDL:
try: try:
import pty import pty
master, slave = pty.openpty() master, slave = pty.openpty()
width = shutil.get_terminal_size().columns width = compat_get_terminal_size().columns
width_args = [] if width is None else ['-w', str(width)] width_args = [] if width is None else ['-w', str(width)]
sp_kwargs = {'stdin': subprocess.PIPE, 'stdout': slave, 'stderr': self._out_files.error} sp_kwargs = {'stdin': subprocess.PIPE, 'stdout': slave, 'stderr': self._out_files.error}
try: try:
@ -791,7 +799,7 @@ class YoutubeDL:
return message return message
assert hasattr(self, '_output_process') assert hasattr(self, '_output_process')
assert isinstance(message, str) assert isinstance(message, compat_str)
line_count = message.count('\n') + 1 line_count = message.count('\n') + 1
self._output_process.stdin.write((message + '\n').encode()) self._output_process.stdin.write((message + '\n').encode())
self._output_process.stdin.flush() self._output_process.stdin.flush()
@ -827,7 +835,7 @@ class YoutubeDL:
def to_stderr(self, message, only_once=False): def to_stderr(self, message, only_once=False):
"""Print message to stderr""" """Print message to stderr"""
assert isinstance(message, str) assert isinstance(message, compat_str)
if self.params.get('logger'): if self.params.get('logger'):
self.params['logger'].error(message) self.params['logger'].error(message)
else: else:
@ -1562,7 +1570,7 @@ class YoutubeDL:
additional_urls = (ie_result or {}).get('additional_urls') additional_urls = (ie_result or {}).get('additional_urls')
if additional_urls: if additional_urls:
# TODO: Improve MetadataParserPP to allow setting a list # TODO: Improve MetadataParserPP to allow setting a list
if isinstance(additional_urls, str): if isinstance(additional_urls, compat_str):
additional_urls = [additional_urls] additional_urls = [additional_urls]
self.to_screen( self.to_screen(
'[info] %s: %d additional URL(s) requested' % (ie_result['id'], len(additional_urls))) '[info] %s: %d additional URL(s) requested' % (ie_result['id'], len(additional_urls)))
@ -2355,10 +2363,10 @@ class YoutubeDL:
def sanitize_string_field(info, string_field): def sanitize_string_field(info, string_field):
field = info.get(string_field) field = info.get(string_field)
if field is None or isinstance(field, str): if field is None or isinstance(field, compat_str):
return return
report_force_conversion(string_field, 'a string', 'string') report_force_conversion(string_field, 'a string', 'string')
info[string_field] = str(field) info[string_field] = compat_str(field)
def sanitize_numeric_fields(info): def sanitize_numeric_fields(info):
for numeric_field in self._NUMERIC_FIELDS: for numeric_field in self._NUMERIC_FIELDS:
@ -2461,7 +2469,7 @@ class YoutubeDL:
sanitize_numeric_fields(format) sanitize_numeric_fields(format)
format['url'] = sanitize_url(format['url']) format['url'] = sanitize_url(format['url'])
if not format.get('format_id'): if not format.get('format_id'):
format['format_id'] = str(i) format['format_id'] = compat_str(i)
else: else:
# Sanitize format_id from characters used in format selector expression # Sanitize format_id from characters used in format selector expression
format['format_id'] = re.sub(r'[\s,/+\[\]()]', '_', format['format_id']) format['format_id'] = re.sub(r'[\s,/+\[\]()]', '_', format['format_id'])
@ -2611,7 +2619,7 @@ class YoutubeDL:
if chapter or offset: if chapter or offset:
new_info.update({ new_info.update({
'section_start': offset + chapter.get('start_time', 0), 'section_start': offset + chapter.get('start_time', 0),
'section_end': offset + min(chapter.get('end_time', duration), duration), 'section_end': offset + min(chapter.get('end_time', 0), duration),
'section_title': chapter.get('title'), 'section_title': chapter.get('title'),
'section_number': chapter.get('index'), 'section_number': chapter.get('index'),
}) })
@ -3716,7 +3724,7 @@ class YoutubeDL:
else: else:
proxies = {'http': opts_proxy, 'https': opts_proxy} proxies = {'http': opts_proxy, 'https': opts_proxy}
else: else:
proxies = urllib.request.getproxies() proxies = compat_urllib_request.getproxies()
# Set HTTPS proxy to HTTP one if given (https://github.com/ytdl-org/youtube-dl/issues/805) # Set HTTPS proxy to HTTP one if given (https://github.com/ytdl-org/youtube-dl/issues/805)
if 'http' in proxies and 'https' not in proxies: if 'http' in proxies and 'https' not in proxies:
proxies['https'] = proxies['http'] proxies['https'] = proxies['http']
@ -3732,13 +3740,13 @@ class YoutubeDL:
# default FileHandler and allows us to disable the file protocol, which # default FileHandler and allows us to disable the file protocol, which
# can be used for malicious purposes (see # can be used for malicious purposes (see
# https://github.com/ytdl-org/youtube-dl/issues/8227) # https://github.com/ytdl-org/youtube-dl/issues/8227)
file_handler = urllib.request.FileHandler() file_handler = compat_urllib_request.FileHandler()
def file_open(*args, **kwargs): def file_open(*args, **kwargs):
raise urllib.error.URLError('file:// scheme is explicitly disabled in yt-dlp for security reasons') raise compat_urllib_error.URLError('file:// scheme is explicitly disabled in yt-dlp for security reasons')
file_handler.file_open = file_open file_handler.file_open = file_open
opener = urllib.request.build_opener( opener = compat_urllib_request.build_opener(
proxy_handler, https_handler, cookie_processor, ydlh, redirect_handler, data_handler, file_handler) proxy_handler, https_handler, cookie_processor, ydlh, redirect_handler, data_handler, file_handler)
# Delete the default user-agent header, which would otherwise apply in # Delete the default user-agent header, which would otherwise apply in

View File

@ -1,15 +1,15 @@
#!/usr/bin/env python3
f'You are using an unsupported version of Python. Only Python versions 3.6 and above are supported by yt-dlp' # noqa: F541 f'You are using an unsupported version of Python. Only Python versions 3.6 and above are supported by yt-dlp' # noqa: F541
__license__ = 'Public Domain' __license__ = 'Public Domain'
import getpass
import itertools import itertools
import optparse import optparse
import os import os
import re import re
import sys import sys
from .compat import compat_shlex_quote from .compat import compat_getpass, compat_shlex_quote
from .cookies import SUPPORTED_BROWSERS, SUPPORTED_KEYRINGS from .cookies import SUPPORTED_BROWSERS, SUPPORTED_KEYRINGS
from .downloader import FileDownloader from .downloader import FileDownloader
from .downloader.external import get_external_downloader from .downloader.external import get_external_downloader
@ -531,9 +531,9 @@ def validate_options(opts):
# Ask for passwords # Ask for passwords
if opts.username is not None and opts.password is None: if opts.username is not None and opts.password is None:
opts.password = getpass.getpass('Type account password and press [Return]: ') opts.password = compat_getpass('Type account password and press [Return]: ')
if opts.ap_username is not None and opts.ap_password is None: if opts.ap_username is not None and opts.ap_password is None:
opts.ap_password = getpass.getpass('Type TV provider account password and press [Return]: ') opts.ap_password = compat_getpass('Type TV provider account password and press [Return]: ')
return warnings, deprecation_warnings return warnings, deprecation_warnings

View File

@ -1,5 +1,4 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
# Execute with # Execute with
# $ python -m yt_dlp # $ python -m yt_dlp

View File

@ -1,7 +1,6 @@
import base64
from math import ceil from math import ceil
from .compat import compat_ord from .compat import compat_b64decode, compat_ord
from .dependencies import Cryptodome_AES from .dependencies import Cryptodome_AES
from .utils import bytes_to_intlist, intlist_to_bytes from .utils import bytes_to_intlist, intlist_to_bytes
@ -265,7 +264,7 @@ def aes_decrypt_text(data, password, key_size_bytes):
""" """
NONCE_LENGTH_BYTES = 8 NONCE_LENGTH_BYTES = 8
data = bytes_to_intlist(base64.b64decode(data)) data = bytes_to_intlist(compat_b64decode(data))
password = bytes_to_intlist(password.encode()) password = bytes_to_intlist(password.encode())
key = password[:key_size_bytes] + [0] * (key_size_bytes - len(password)) key = password[:key_size_bytes] + [0] * (key_size_bytes - len(password))

View File

@ -6,6 +6,7 @@ import re
import shutil import shutil
import traceback import traceback
from .compat import compat_getenv
from .utils import expand_path, write_json_file from .utils import expand_path, write_json_file
@ -16,7 +17,7 @@ class Cache:
def _get_root_dir(self): def _get_root_dir(self):
res = self._ydl.params.get('cachedir') res = self._ydl.params.get('cachedir')
if res is None: if res is None:
cache_root = os.getenv('XDG_CACHE_HOME', '~/.cache') cache_root = compat_getenv('XDG_CACHE_HOME', '~/.cache')
res = os.path.join(cache_root, 'yt-dlp') res = os.path.join(cache_root, 'yt-dlp')
return expand_path(res) return expand_path(res)

View File

@ -7,6 +7,7 @@ from . import re
from ._deprecated import * # noqa: F401, F403 from ._deprecated import * # noqa: F401, F403
from .compat_utils import passthrough_module from .compat_utils import passthrough_module
# XXX: Implement this the same way as other DeprecationWarnings without circular import # XXX: Implement this the same way as other DeprecationWarnings without circular import
try: try:
passthrough_module(__name__, '._legacy', callback=lambda attr: warnings.warn( passthrough_module(__name__, '._legacy', callback=lambda attr: warnings.warn(

View File

@ -1,16 +1,52 @@
"""Deprecated - New code should avoid these""" """Deprecated - New code should avoid these"""
import base64 import base64
import urllib.error import getpass
import urllib.parse import html
import html.parser
compat_str = str import http
import http.client
import http.cookiejar
import http.cookies
import http.server
import itertools
import os
import shutil
import struct
import tokenize
import urllib
compat_b64decode = base64.b64decode compat_b64decode = base64.b64decode
compat_chr = chr
compat_cookiejar = http.cookiejar
compat_cookiejar_Cookie = http.cookiejar.Cookie
compat_cookies_SimpleCookie = http.cookies.SimpleCookie
compat_get_terminal_size = shutil.get_terminal_size
compat_getenv = os.getenv
compat_getpass = getpass.getpass
compat_html_entities = html.entities
compat_html_entities_html5 = html.entities.html5
compat_HTMLParser = html.parser.HTMLParser
compat_http_client = http.client
compat_http_server = http.server
compat_HTTPError = urllib.error.HTTPError compat_HTTPError = urllib.error.HTTPError
compat_urlparse = urllib.parse compat_itertools_count = itertools.count
compat_parse_qs = urllib.parse.parse_qs compat_parse_qs = urllib.parse.parse_qs
compat_str = str
compat_struct_pack = struct.pack
compat_struct_unpack = struct.unpack
compat_tokenize_tokenize = tokenize.tokenize
compat_urllib_error = urllib.error
compat_urllib_parse_unquote = urllib.parse.unquote compat_urllib_parse_unquote = urllib.parse.unquote
compat_urllib_parse_unquote_plus = urllib.parse.unquote_plus
compat_urllib_parse_urlencode = urllib.parse.urlencode compat_urllib_parse_urlencode = urllib.parse.urlencode
compat_urllib_parse_urlparse = urllib.parse.urlparse compat_urllib_parse_urlparse = urllib.parse.urlparse
compat_urllib_request = urllib.request
compat_urlparse = compat_urllib_parse = urllib.parse
def compat_setenv(key, value, env=os.environ):
env[key] = value
__all__ = [x for x in globals() if x.startswith('compat_')]

View File

@ -2,27 +2,18 @@
import collections import collections
import ctypes import ctypes
import getpass import http
import html.entities
import html.parser
import http.client import http.client
import http.cookiejar import http.cookiejar
import http.cookies import http.cookies
import http.server import http.server
import itertools
import os
import shlex import shlex
import shutil
import socket import socket
import struct import struct
import tokenize import urllib
import urllib.error
import urllib.parse
import urllib.request
import xml.etree.ElementTree as etree import xml.etree.ElementTree as etree
from subprocess import DEVNULL from subprocess import DEVNULL
from .compat_utils import passthrough_module # isort: split
from .asyncio import run as compat_asyncio_run # noqa: F401 from .asyncio import run as compat_asyncio_run # noqa: F401
from .re import Pattern as compat_Pattern # noqa: F401 from .re import Pattern as compat_Pattern # noqa: F401
from .re import match as compat_Match # noqa: F401 from .re import match as compat_Match # noqa: F401
@ -30,8 +21,6 @@ from ..dependencies import Cryptodome_AES as compat_pycrypto_AES # noqa: F401
from ..dependencies import brotli as compat_brotli # noqa: F401 from ..dependencies import brotli as compat_brotli # noqa: F401
from ..dependencies import websockets as compat_websockets # noqa: F401 from ..dependencies import websockets as compat_websockets # noqa: F401
passthrough_module(__name__, '...utils', ('WINDOWS_VT_MODE', 'windows_enable_vt_mode'))
# compat_ctypes_WINFUNCTYPE = ctypes.WINFUNCTYPE # compat_ctypes_WINFUNCTYPE = ctypes.WINFUNCTYPE
# will not work since ctypes.WINFUNCTYPE does not exist in UNIX machines # will not work since ctypes.WINFUNCTYPE does not exist in UNIX machines
@ -39,17 +28,12 @@ def compat_ctypes_WINFUNCTYPE(*args, **kwargs):
return ctypes.WINFUNCTYPE(*args, **kwargs) return ctypes.WINFUNCTYPE(*args, **kwargs)
def compat_setenv(key, value, env=os.environ):
env[key] = value
compat_basestring = str compat_basestring = str
compat_collections_abc = collections.abc compat_collections_abc = collections.abc
compat_cookies = http.cookies compat_cookies = http.cookies
compat_etree_Element = etree.Element compat_etree_Element = etree.Element
compat_etree_register_namespace = etree.register_namespace compat_etree_register_namespace = etree.register_namespace
compat_filter = filter compat_filter = filter
compat_getenv = os.getenv
compat_input = input compat_input = input
compat_integer_types = (int, ) compat_integer_types = (int, )
compat_kwargs = lambda kwargs: kwargs compat_kwargs = lambda kwargs: kwargs
@ -65,28 +49,16 @@ compat_urllib_parse_quote_plus = urllib.parse.quote_plus
compat_urllib_parse_unquote_to_bytes = urllib.parse.unquote_to_bytes compat_urllib_parse_unquote_to_bytes = urllib.parse.unquote_to_bytes
compat_urllib_parse_urlunparse = urllib.parse.urlunparse compat_urllib_parse_urlunparse = urllib.parse.urlunparse
compat_urllib_request_DataHandler = urllib.request.DataHandler compat_urllib_request_DataHandler = urllib.request.DataHandler
compat_urllib_request = urllib.request
compat_urllib_response = urllib.response compat_urllib_response = urllib.response
compat_urlretrieve = urllib.request.urlretrieve compat_urlretrieve = urllib.request.urlretrieve
compat_xml_parse_error = etree.ParseError compat_xml_parse_error = etree.ParseError
compat_xpath = lambda xpath: xpath compat_xpath = lambda xpath: xpath
compat_zip = zip compat_zip = zip
workaround_optparse_bug9161 = lambda: None workaround_optparse_bug9161 = lambda: None
compat_getpass = getpass.getpass
compat_chr = chr
compat_urllib_parse = urllib.parse def __getattr__(name):
compat_itertools_count = itertools.count if name in ('WINDOWS_VT_MODE', 'windows_enable_vt_mode'):
compat_cookiejar = http.cookiejar from .. import utils
compat_cookiejar_Cookie = http.cookiejar.Cookie return getattr(utils, name)
compat_cookies_SimpleCookie = http.cookies.SimpleCookie raise AttributeError(name)
compat_get_terminal_size = shutil.get_terminal_size
compat_html_entities = html.entities
compat_html_entities_html5 = html.entities.html5
compat_tokenize_tokenize = tokenize.tokenize
compat_HTMLParser = html.parser.HTMLParser
compat_http_client = http.client
compat_http_server = http.server
compat_struct_pack = struct.pack
compat_struct_unpack = struct.unpack
compat_urllib_error = urllib.error
compat_urllib_parse_unquote_plus = urllib.parse.unquote_plus

View File

@ -4,6 +4,7 @@ import importlib
import sys import sys
import types import types
_NO_ATTRIBUTE = object() _NO_ATTRIBUTE = object()
_Package = collections.namedtuple('Package', ('name', 'version')) _Package = collections.namedtuple('Package', ('name', 'version'))
@ -30,7 +31,7 @@ def _is_package(module):
return True return True
def passthrough_module(parent, child, allowed_attributes=None, *, callback=lambda _: None): def passthrough_module(parent, child, *, callback=lambda _: None):
parent_module = importlib.import_module(parent) parent_module = importlib.import_module(parent)
child_module = None # Import child module only as needed child_module = None # Import child module only as needed
@ -40,30 +41,22 @@ def passthrough_module(parent, child, allowed_attributes=None, *, callback=lambd
with contextlib.suppress(ImportError): with contextlib.suppress(ImportError):
return importlib.import_module(f'.{attr}', parent) return importlib.import_module(f'.{attr}', parent)
ret = self.__from_child(attr)
if ret is _NO_ATTRIBUTE:
raise AttributeError(f'module {parent} has no attribute {attr}')
callback(attr)
return ret
def __from_child(self, attr):
if allowed_attributes is None:
if attr.startswith('__') and attr.endswith('__'):
return _NO_ATTRIBUTE
elif attr not in allowed_attributes:
return _NO_ATTRIBUTE
nonlocal child_module nonlocal child_module
child_module = child_module or importlib.import_module(child, parent) child_module = child_module or importlib.import_module(child, parent)
ret = _NO_ATTRIBUTE
with contextlib.suppress(AttributeError): with contextlib.suppress(AttributeError):
return getattr(child_module, attr) ret = getattr(child_module, attr)
if _is_package(child_module): if _is_package(child_module):
with contextlib.suppress(ImportError): with contextlib.suppress(ImportError):
return importlib.import_module(f'.{attr}', child) ret = importlib.import_module(f'.{attr}', child)
return _NO_ATTRIBUTE if ret is _NO_ATTRIBUTE:
raise AttributeError(f'module {parent} has no attribute {attr}')
callback(attr)
return ret
# Python 3.6 does not have module level __getattr__ # Python 3.6 does not have module level __getattr__
# https://peps.python.org/pep-0562/ # https://peps.python.org/pep-0562/

View File

@ -1,7 +1,5 @@
import base64
import contextlib import contextlib
import ctypes import ctypes
import http.cookiejar
import json import json
import os import os
import shutil import shutil
@ -19,6 +17,7 @@ from .aes import (
aes_gcm_decrypt_and_verify_bytes, aes_gcm_decrypt_and_verify_bytes,
unpad_pkcs7, unpad_pkcs7,
) )
from .compat import compat_b64decode, compat_cookiejar_Cookie
from .dependencies import ( from .dependencies import (
_SECRETSTORAGE_UNAVAILABLE_REASON, _SECRETSTORAGE_UNAVAILABLE_REASON,
secretstorage, secretstorage,
@ -143,7 +142,7 @@ def _extract_firefox_cookies(profile, logger):
total_cookie_count = len(table) total_cookie_count = len(table)
for i, (host, name, value, path, expiry, is_secure) in enumerate(table): for i, (host, name, value, path, expiry, is_secure) in enumerate(table):
progress_bar.print(f'Loading cookie {i: 6d}/{total_cookie_count: 6d}') progress_bar.print(f'Loading cookie {i: 6d}/{total_cookie_count: 6d}')
cookie = http.cookiejar.Cookie( cookie = compat_cookiejar_Cookie(
version=0, name=name, value=value, port=None, port_specified=False, version=0, name=name, value=value, port=None, port_specified=False,
domain=host, domain_specified=bool(host), domain_initial_dot=host.startswith('.'), domain=host, domain_specified=bool(host), domain_initial_dot=host.startswith('.'),
path=path, path_specified=bool(path), secure=is_secure, expires=expiry, discard=False, path=path, path_specified=bool(path), secure=is_secure, expires=expiry, discard=False,
@ -298,7 +297,7 @@ def _process_chrome_cookie(decryptor, host_key, name, value, encrypted_value, pa
if value is None: if value is None:
return is_encrypted, None return is_encrypted, None
return is_encrypted, http.cookiejar.Cookie( return is_encrypted, compat_cookiejar_Cookie(
version=0, name=name, value=value, port=None, port_specified=False, version=0, name=name, value=value, port=None, port_specified=False,
domain=host_key, domain_specified=bool(host_key), domain_initial_dot=host_key.startswith('.'), domain=host_key, domain_specified=bool(host_key), domain_initial_dot=host_key.startswith('.'),
path=path, path_specified=bool(path), secure=is_secure, expires=expires_utc, discard=False, path=path, path_specified=bool(path), secure=is_secure, expires=expires_utc, discard=False,
@ -590,7 +589,7 @@ def _parse_safari_cookies_record(data, jar, logger):
p.skip_to(record_size, 'space at the end of the record') p.skip_to(record_size, 'space at the end of the record')
cookie = http.cookiejar.Cookie( cookie = compat_cookiejar_Cookie(
version=0, name=name, value=value, port=None, port_specified=False, version=0, name=name, value=value, port=None, port_specified=False,
domain=domain, domain_specified=bool(domain), domain_initial_dot=domain.startswith('.'), domain=domain, domain_specified=bool(domain), domain_initial_dot=domain.startswith('.'),
path=path, path_specified=bool(path), secure=is_secure, expires=expiration_date, discard=False, path=path, path_specified=bool(path), secure=is_secure, expires=expiration_date, discard=False,
@ -836,7 +835,7 @@ def _get_windows_v10_key(browser_root, logger):
except KeyError: except KeyError:
logger.error('no encrypted key in Local State') logger.error('no encrypted key in Local State')
return None return None
encrypted_key = base64.b64decode(base64_key) encrypted_key = compat_b64decode(base64_key)
prefix = b'DPAPI' prefix = b'DPAPI'
if not encrypted_key.startswith(prefix): if not encrypted_key.startswith(prefix):
logger.error('invalid key') logger.error('invalid key')

View File

@ -6,7 +6,8 @@ import sys
import time import time
from .fragment import FragmentFD from .fragment import FragmentFD
from ..compat import functools from ..compat import functools # isort: split
from ..compat import compat_setenv
from ..postprocessor.ffmpeg import EXT_TO_OUT_FORMATS, FFmpegPostProcessor from ..postprocessor.ffmpeg import EXT_TO_OUT_FORMATS, FFmpegPostProcessor
from ..utils import ( from ..utils import (
Popen, Popen,
@ -402,8 +403,8 @@ class FFmpegFD(ExternalFD):
# We could switch to the following code if we are able to detect version properly # We could switch to the following code if we are able to detect version properly
# args += ['-http_proxy', proxy] # args += ['-http_proxy', proxy]
env = os.environ.copy() env = os.environ.copy()
env['HTTP_PROXY'] = proxy compat_setenv('HTTP_PROXY', proxy, env=env)
env['http_proxy'] = proxy compat_setenv('http_proxy', proxy, env=env)
protocol = info_dict.get('protocol') protocol = info_dict.get('protocol')

View File

@ -1,13 +1,17 @@
import base64
import io import io
import itertools import itertools
import struct
import time import time
import urllib.error
import urllib.parse
from .fragment import FragmentFD from .fragment import FragmentFD
from ..compat import compat_etree_fromstring from ..compat import (
compat_b64decode,
compat_etree_fromstring,
compat_struct_pack,
compat_struct_unpack,
compat_urllib_error,
compat_urllib_parse_urlparse,
compat_urlparse,
)
from ..utils import fix_xml_ampersands, xpath_text from ..utils import fix_xml_ampersands, xpath_text
@ -31,13 +35,13 @@ class FlvReader(io.BytesIO):
# Utility functions for reading numbers and strings # Utility functions for reading numbers and strings
def read_unsigned_long_long(self): def read_unsigned_long_long(self):
return struct.unpack('!Q', self.read_bytes(8))[0] return compat_struct_unpack('!Q', self.read_bytes(8))[0]
def read_unsigned_int(self): def read_unsigned_int(self):
return struct.unpack('!I', self.read_bytes(4))[0] return compat_struct_unpack('!I', self.read_bytes(4))[0]
def read_unsigned_char(self): def read_unsigned_char(self):
return struct.unpack('!B', self.read_bytes(1))[0] return compat_struct_unpack('!B', self.read_bytes(1))[0]
def read_string(self): def read_string(self):
res = b'' res = b''
@ -199,11 +203,11 @@ def build_fragments_list(boot_info):
def write_unsigned_int(stream, val): def write_unsigned_int(stream, val):
stream.write(struct.pack('!I', val)) stream.write(compat_struct_pack('!I', val))
def write_unsigned_int_24(stream, val): def write_unsigned_int_24(stream, val):
stream.write(struct.pack('!I', val)[1:]) stream.write(compat_struct_pack('!I', val)[1:])
def write_flv_header(stream): def write_flv_header(stream):
@ -297,12 +301,12 @@ class F4mFD(FragmentFD):
# 1. http://live-1-1.rutube.ru/stream/1024/HDS/SD/C2NKsS85HQNckgn5HdEmOQ/1454167650/S-s604419906/move/four/dirs/upper/1024-576p.f4m # 1. http://live-1-1.rutube.ru/stream/1024/HDS/SD/C2NKsS85HQNckgn5HdEmOQ/1454167650/S-s604419906/move/four/dirs/upper/1024-576p.f4m
bootstrap_url = node.get('url') bootstrap_url = node.get('url')
if bootstrap_url: if bootstrap_url:
bootstrap_url = urllib.parse.urljoin( bootstrap_url = compat_urlparse.urljoin(
base_url, bootstrap_url) base_url, bootstrap_url)
boot_info = self._get_bootstrap_from_url(bootstrap_url) boot_info = self._get_bootstrap_from_url(bootstrap_url)
else: else:
bootstrap_url = None bootstrap_url = None
bootstrap = base64.b64decode(node.text) bootstrap = compat_b64decode(node.text)
boot_info = read_bootstrap_info(bootstrap) boot_info = read_bootstrap_info(bootstrap)
return boot_info, bootstrap_url return boot_info, bootstrap_url
@ -332,14 +336,14 @@ class F4mFD(FragmentFD):
# Prefer baseURL for relative URLs as per 11.2 of F4M 3.0 spec. # Prefer baseURL for relative URLs as per 11.2 of F4M 3.0 spec.
man_base_url = get_base_url(doc) or man_url man_base_url = get_base_url(doc) or man_url
base_url = urllib.parse.urljoin(man_base_url, media.attrib['url']) base_url = compat_urlparse.urljoin(man_base_url, media.attrib['url'])
bootstrap_node = doc.find(_add_ns('bootstrapInfo')) bootstrap_node = doc.find(_add_ns('bootstrapInfo'))
boot_info, bootstrap_url = self._parse_bootstrap_node( boot_info, bootstrap_url = self._parse_bootstrap_node(
bootstrap_node, man_base_url) bootstrap_node, man_base_url)
live = boot_info['live'] live = boot_info['live']
metadata_node = media.find(_add_ns('metadata')) metadata_node = media.find(_add_ns('metadata'))
if metadata_node is not None: if metadata_node is not None:
metadata = base64.b64decode(metadata_node.text) metadata = compat_b64decode(metadata_node.text)
else: else:
metadata = None metadata = None
@ -367,7 +371,7 @@ class F4mFD(FragmentFD):
if not live: if not live:
write_metadata_tag(dest_stream, metadata) write_metadata_tag(dest_stream, metadata)
base_url_parsed = urllib.parse.urlparse(base_url) base_url_parsed = compat_urllib_parse_urlparse(base_url)
self._start_frag_download(ctx, info_dict) self._start_frag_download(ctx, info_dict)
@ -407,7 +411,7 @@ class F4mFD(FragmentFD):
if box_type == b'mdat': if box_type == b'mdat':
self._append_fragment(ctx, box_data) self._append_fragment(ctx, box_data)
break break
except urllib.error.HTTPError as err: except compat_urllib_error.HTTPError as err:
if live and (err.code == 404 or err.code == 410): if live and (err.code == 404 or err.code == 410):
# We didn't keep up with the live window. Continue # We didn't keep up with the live window. Continue
# with the next available fragment. # with the next available fragment.

View File

@ -4,14 +4,12 @@ import http.client
import json import json
import math import math
import os import os
import struct
import time import time
import urllib.error
from .common import FileDownloader from .common import FileDownloader
from .http import HttpFD from .http import HttpFD
from ..aes import aes_cbc_decrypt_bytes, unpad_pkcs7 from ..aes import aes_cbc_decrypt_bytes, unpad_pkcs7
from ..compat import compat_os_name from ..compat import compat_os_name, compat_struct_pack, compat_urllib_error
from ..utils import ( from ..utils import (
DownloadError, DownloadError,
encodeFilename, encodeFilename,
@ -350,7 +348,7 @@ class FragmentFD(FileDownloader):
decrypt_info = fragment.get('decrypt_info') decrypt_info = fragment.get('decrypt_info')
if not decrypt_info or decrypt_info['METHOD'] != 'AES-128': if not decrypt_info or decrypt_info['METHOD'] != 'AES-128':
return frag_content return frag_content
iv = decrypt_info.get('IV') or struct.pack('>8xq', fragment['media_sequence']) iv = decrypt_info.get('IV') or compat_struct_pack('>8xq', fragment['media_sequence'])
decrypt_info['KEY'] = decrypt_info.get('KEY') or _get_key(info_dict.get('_decryption_key_url') or decrypt_info['URI']) decrypt_info['KEY'] = decrypt_info.get('KEY') or _get_key(info_dict.get('_decryption_key_url') or decrypt_info['URI'])
# Don't decrypt the content in tests since the data is explicitly truncated and it's not to a valid block # Don't decrypt the content in tests since the data is explicitly truncated and it's not to a valid block
# size (see https://github.com/ytdl-org/youtube-dl/pull/27660). Tests only care that the correct data downloaded, # size (see https://github.com/ytdl-org/youtube-dl/pull/27660). Tests only care that the correct data downloaded,
@ -459,7 +457,7 @@ class FragmentFD(FileDownloader):
if self._download_fragment(ctx, fragment['url'], info_dict, headers): if self._download_fragment(ctx, fragment['url'], info_dict, headers):
break break
return return
except (urllib.error.HTTPError, http.client.IncompleteRead) as err: except (compat_urllib_error.HTTPError, http.client.IncompleteRead) as err:
# Unavailable (possibly temporary) fragments may be served. # Unavailable (possibly temporary) fragments may be served.
# First we try to retry then either skip or abort. # First we try to retry then either skip or abort.
# See https://github.com/ytdl-org/youtube-dl/issues/10165, # See https://github.com/ytdl-org/youtube-dl/issues/10165,

View File

@ -1,12 +1,12 @@
import binascii import binascii
import io import io
import re import re
import urllib.parse
from . import get_suitable_downloader from . import get_suitable_downloader
from .external import FFmpegFD from .external import FFmpegFD
from .fragment import FragmentFD from .fragment import FragmentFD
from .. import webvtt from .. import webvtt
from ..compat import compat_urlparse
from ..dependencies import Cryptodome_AES from ..dependencies import Cryptodome_AES
from ..utils import bug_reports_message, parse_m3u8_attributes, update_url_query from ..utils import bug_reports_message, parse_m3u8_attributes, update_url_query
@ -140,7 +140,7 @@ class HlsFD(FragmentFD):
extra_query = None extra_query = None
extra_param_to_segment_url = info_dict.get('extra_param_to_segment_url') extra_param_to_segment_url = info_dict.get('extra_param_to_segment_url')
if extra_param_to_segment_url: if extra_param_to_segment_url:
extra_query = urllib.parse.parse_qs(extra_param_to_segment_url) extra_query = compat_urlparse.parse_qs(extra_param_to_segment_url)
i = 0 i = 0
media_sequence = 0 media_sequence = 0
decrypt_info = {'METHOD': 'NONE'} decrypt_info = {'METHOD': 'NONE'}
@ -162,7 +162,7 @@ class HlsFD(FragmentFD):
frag_url = ( frag_url = (
line line
if re.match(r'^https?://', line) if re.match(r'^https?://', line)
else urllib.parse.urljoin(man_url, line)) else compat_urlparse.urljoin(man_url, line))
if extra_query: if extra_query:
frag_url = update_url_query(frag_url, extra_query) frag_url = update_url_query(frag_url, extra_query)
@ -187,7 +187,7 @@ class HlsFD(FragmentFD):
frag_url = ( frag_url = (
map_info.get('URI') map_info.get('URI')
if re.match(r'^https?://', map_info.get('URI')) if re.match(r'^https?://', map_info.get('URI'))
else urllib.parse.urljoin(man_url, map_info.get('URI'))) else compat_urlparse.urljoin(man_url, map_info.get('URI')))
if extra_query: if extra_query:
frag_url = update_url_query(frag_url, extra_query) frag_url = update_url_query(frag_url, extra_query)
@ -215,7 +215,7 @@ class HlsFD(FragmentFD):
if 'IV' in decrypt_info: if 'IV' in decrypt_info:
decrypt_info['IV'] = binascii.unhexlify(decrypt_info['IV'][2:].zfill(32)) decrypt_info['IV'] = binascii.unhexlify(decrypt_info['IV'][2:].zfill(32))
if not re.match(r'^https?://', decrypt_info['URI']): if not re.match(r'^https?://', decrypt_info['URI']):
decrypt_info['URI'] = urllib.parse.urljoin( decrypt_info['URI'] = compat_urlparse.urljoin(
man_url, decrypt_info['URI']) man_url, decrypt_info['URI'])
if extra_query: if extra_query:
decrypt_info['URI'] = update_url_query(decrypt_info['URI'], extra_query) decrypt_info['URI'] = update_url_query(decrypt_info['URI'], extra_query)

View File

@ -1,12 +1,11 @@
import http.client
import os import os
import random import random
import socket import socket
import ssl import ssl
import time import time
import urllib.error
from .common import FileDownloader from .common import FileDownloader
from ..compat import compat_http_client, compat_urllib_error
from ..utils import ( from ..utils import (
ContentTooShortError, ContentTooShortError,
ThrottledDownload, ThrottledDownload,
@ -25,7 +24,7 @@ RESPONSE_READ_EXCEPTIONS = (
socket.timeout, # compat: py < 3.10 socket.timeout, # compat: py < 3.10
ConnectionError, ConnectionError,
ssl.SSLError, ssl.SSLError,
http.client.HTTPException compat_http_client.HTTPException
) )
@ -156,7 +155,7 @@ class HttpFD(FileDownloader):
ctx.resume_len = 0 ctx.resume_len = 0
ctx.open_mode = 'wb' ctx.open_mode = 'wb'
ctx.data_len = ctx.content_len = int_or_none(ctx.data.info().get('Content-length', None)) ctx.data_len = ctx.content_len = int_or_none(ctx.data.info().get('Content-length', None))
except urllib.error.HTTPError as err: except compat_urllib_error.HTTPError as err:
if err.code == 416: if err.code == 416:
# Unable to resume (requested range not satisfiable) # Unable to resume (requested range not satisfiable)
try: try:
@ -164,7 +163,7 @@ class HttpFD(FileDownloader):
ctx.data = self.ydl.urlopen( ctx.data = self.ydl.urlopen(
sanitized_Request(url, request_data, headers)) sanitized_Request(url, request_data, headers))
content_length = ctx.data.info()['Content-Length'] content_length = ctx.data.info()['Content-Length']
except urllib.error.HTTPError as err: except compat_urllib_error.HTTPError as err:
if err.code < 500 or err.code >= 600: if err.code < 500 or err.code >= 600:
raise raise
else: else:
@ -197,7 +196,7 @@ class HttpFD(FileDownloader):
# Unexpected HTTP error # Unexpected HTTP error
raise raise
raise RetryDownload(err) raise RetryDownload(err)
except urllib.error.URLError as err: except compat_urllib_error.URLError as err:
if isinstance(err.reason, ssl.CertificateError): if isinstance(err.reason, ssl.CertificateError):
raise raise
raise RetryDownload(err) raise RetryDownload(err)

View File

@ -2,9 +2,9 @@ import binascii
import io import io
import struct import struct
import time import time
import urllib.error
from .fragment import FragmentFD from .fragment import FragmentFD
from ..compat import compat_urllib_error
u8 = struct.Struct('>B') u8 = struct.Struct('>B')
u88 = struct.Struct('>Bx') u88 = struct.Struct('>Bx')
@ -268,7 +268,7 @@ class IsmFD(FragmentFD):
extra_state['ism_track_written'] = True extra_state['ism_track_written'] = True
self._append_fragment(ctx, frag_content) self._append_fragment(ctx, frag_content)
break break
except urllib.error.HTTPError as err: except compat_urllib_error.HTTPError as err:
count += 1 count += 1
if count <= fragment_retries: if count <= fragment_retries:
self.report_retry_fragment(err, frag_index, count, fragment_retries) self.report_retry_fragment(err, frag_index, count, fragment_retries)

View File

@ -4,6 +4,7 @@ import subprocess
import time import time
from .common import FileDownloader from .common import FileDownloader
from ..compat import compat_str
from ..utils import ( from ..utils import (
Popen, Popen,
check_executable, check_executable,
@ -142,7 +143,7 @@ class RtmpFD(FileDownloader):
if isinstance(conn, list): if isinstance(conn, list):
for entry in conn: for entry in conn:
basic_args += ['--conn', entry] basic_args += ['--conn', entry]
elif isinstance(conn, str): elif isinstance(conn, compat_str):
basic_args += ['--conn', conn] basic_args += ['--conn', conn]
if protocol is not None: if protocol is not None:
basic_args += ['--protocol', protocol] basic_args += ['--protocol', protocol]

View File

@ -1,8 +1,8 @@
import json import json
import time import time
import urllib.error
from .fragment import FragmentFD from .fragment import FragmentFD
from ..compat import compat_urllib_error
from ..utils import RegexNotFoundError, dict_get, int_or_none, try_get from ..utils import RegexNotFoundError, dict_get, int_or_none, try_get
@ -128,7 +128,7 @@ class YoutubeLiveChatFD(FragmentFD):
elif info_dict['protocol'] == 'youtube_live_chat': elif info_dict['protocol'] == 'youtube_live_chat':
continuation_id, offset, click_tracking_params = parse_actions_live(live_chat_continuation) continuation_id, offset, click_tracking_params = parse_actions_live(live_chat_continuation)
return True, continuation_id, offset, click_tracking_params return True, continuation_id, offset, click_tracking_params
except urllib.error.HTTPError as err: except compat_urllib_error.HTTPError as err:
count += 1 count += 1
if count <= fragment_retries: if count <= fragment_retries:
self.report_retry_fragment(err, frag_index, count, fragment_retries) self.report_retry_fragment(err, frag_index, count, fragment_retries)

View File

@ -7,13 +7,12 @@ import json
import re import re
import struct import struct
import time import time
import urllib.parse
import urllib.request
import urllib.response import urllib.response
import uuid import uuid
from .common import InfoExtractor from .common import InfoExtractor
from ..aes import aes_ecb_decrypt from ..aes import aes_ecb_decrypt
from ..compat import compat_urllib_parse_urlparse, compat_urllib_request
from ..utils import ( from ..utils import (
ExtractorError, ExtractorError,
bytes_to_intlist, bytes_to_intlist,
@ -34,7 +33,7 @@ def add_opener(ydl, handler):
''' Add a handler for opening URLs, like _download_webpage ''' ''' Add a handler for opening URLs, like _download_webpage '''
# https://github.com/python/cpython/blob/main/Lib/urllib/request.py#L426 # https://github.com/python/cpython/blob/main/Lib/urllib/request.py#L426
# https://github.com/python/cpython/blob/main/Lib/urllib/request.py#L605 # https://github.com/python/cpython/blob/main/Lib/urllib/request.py#L605
assert isinstance(ydl._opener, urllib.request.OpenerDirector) assert isinstance(ydl._opener, compat_urllib_request.OpenerDirector)
ydl._opener.add_handler(handler) ydl._opener.add_handler(handler)
@ -47,7 +46,7 @@ def remove_opener(ydl, handler):
# https://github.com/python/cpython/blob/main/Lib/urllib/request.py#L426 # https://github.com/python/cpython/blob/main/Lib/urllib/request.py#L426
# https://github.com/python/cpython/blob/main/Lib/urllib/request.py#L605 # https://github.com/python/cpython/blob/main/Lib/urllib/request.py#L605
opener = ydl._opener opener = ydl._opener
assert isinstance(ydl._opener, urllib.request.OpenerDirector) assert isinstance(ydl._opener, compat_urllib_request.OpenerDirector)
if isinstance(handler, (type, tuple)): if isinstance(handler, (type, tuple)):
find_cp = lambda x: isinstance(x, handler) find_cp = lambda x: isinstance(x, handler)
else: else:
@ -97,7 +96,7 @@ def remove_opener(ydl, handler):
opener.handlers[:] = [x for x in opener.handlers if not find_cp(x)] opener.handlers[:] = [x for x in opener.handlers if not find_cp(x)]
class AbemaLicenseHandler(urllib.request.BaseHandler): class AbemaLicenseHandler(compat_urllib_request.BaseHandler):
handler_order = 499 handler_order = 499
STRTABLE = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz' STRTABLE = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
HKEY = b'3AF0298C219469522A313570E8583005A642E73EDD58E3EA2FB7339D3DF1597E' HKEY = b'3AF0298C219469522A313570E8583005A642E73EDD58E3EA2FB7339D3DF1597E'
@ -137,7 +136,7 @@ class AbemaLicenseHandler(urllib.request.BaseHandler):
def abematv_license_open(self, url): def abematv_license_open(self, url):
url = request_to_url(url) url = request_to_url(url)
ticket = urllib.parse.urlparse(url).netloc ticket = compat_urllib_parse_urlparse(url).netloc
response_data = self._get_videokey_from_ticket(ticket) response_data = self._get_videokey_from_ticket(ticket)
return urllib.response.addinfourl(io.BytesIO(response_data), headers={ return urllib.response.addinfourl(io.BytesIO(response_data), headers={
'Content-Length': len(response_data), 'Content-Length': len(response_data),

View File

@ -1,4 +1,3 @@
import getpass
import json import json
import re import re
import time import time
@ -6,14 +5,18 @@ import urllib.error
import xml.etree.ElementTree as etree import xml.etree.ElementTree as etree
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import compat_urlparse from ..compat import (
from ..utils import ( compat_urlparse,
NO_DEFAULT, compat_getpass
ExtractorError,
unescapeHTML,
unified_timestamp,
urlencode_postdata,
) )
from ..utils import (
unescapeHTML,
urlencode_postdata,
unified_timestamp,
ExtractorError,
NO_DEFAULT,
)
MSO_INFO = { MSO_INFO = {
'DTV': { 'DTV': {
@ -1503,7 +1506,7 @@ class AdobePassIE(InfoExtractor):
'send_confirm_link': False, 'send_confirm_link': False,
'send_token': True 'send_token': True
})) }))
philo_code = getpass.getpass('Type auth code you have received [Return]: ') philo_code = compat_getpass('Type auth code you have received [Return]: ')
self._download_webpage( self._download_webpage(
'https://idp.philo.com/auth/update/login_code', video_id, 'Submitting token', data=urlencode_postdata({ 'https://idp.philo.com/auth/update/login_code', video_id, 'Submitting token', data=urlencode_postdata({
'token': philo_code 'token': philo_code

View File

@ -1,34 +1,36 @@
import json
import re import re
import urllib.parse import json
from .common import InfoExtractor from .common import InfoExtractor
from .youtube import YoutubeBaseInfoExtractor, YoutubeIE from .youtube import YoutubeIE, YoutubeBaseInfoExtractor
from ..compat import compat_HTTPError, compat_urllib_parse_unquote from ..compat import (
compat_urllib_parse_unquote,
compat_urllib_parse_unquote_plus,
compat_HTTPError
)
from ..utils import ( from ..utils import (
KNOWN_EXTENSIONS,
ExtractorError,
HEADRequest,
bug_reports_message, bug_reports_message,
clean_html, clean_html,
dict_get, dict_get,
extract_attributes, extract_attributes,
ExtractorError,
get_element_by_id, get_element_by_id,
HEADRequest,
int_or_none, int_or_none,
join_nonempty, join_nonempty,
KNOWN_EXTENSIONS,
merge_dicts, merge_dicts,
mimetype2ext, mimetype2ext,
orderedSet, orderedSet,
parse_duration, parse_duration,
parse_qs, parse_qs,
str_or_none,
str_to_int, str_to_int,
str_or_none,
traverse_obj, traverse_obj,
try_get, try_get,
unified_strdate, unified_strdate,
unified_timestamp, unified_timestamp,
url_or_none,
urlhandle_detect_ext, urlhandle_detect_ext,
url_or_none
) )
@ -141,7 +143,7 @@ class ArchiveOrgIE(InfoExtractor):
return json.loads(extract_attributes(element)['value']) return json.loads(extract_attributes(element)['value'])
def _real_extract(self, url): def _real_extract(self, url):
video_id = urllib.parse.unquote_plus(self._match_id(url)) video_id = compat_urllib_parse_unquote_plus(self._match_id(url))
identifier, entry_id = (video_id.split('/', 1) + [None])[:2] identifier, entry_id = (video_id.split('/', 1) + [None])[:2]
# Archive.org metadata API doesn't clearly demarcate playlist entries # Archive.org metadata API doesn't clearly demarcate playlist entries

View File

@ -1,8 +1,8 @@
import random import random
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import compat_str, compat_urllib_parse_unquote from ..utils import ExtractorError, try_get, compat_str, str_or_none
from ..utils import ExtractorError, str_or_none, try_get from ..compat import compat_urllib_parse_unquote
class AudiusBaseIE(InfoExtractor): class AudiusBaseIE(InfoExtractor):

View File

@ -1,12 +1,16 @@
import xml.etree.ElementTree
import functools import functools
import itertools import itertools
import json import json
import re import re
import urllib.error
import xml.etree.ElementTree
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import compat_HTTPError, compat_str, compat_urlparse from ..compat import (
compat_HTTPError,
compat_str,
compat_urllib_error,
compat_urlparse,
)
from ..utils import ( from ..utils import (
ExtractorError, ExtractorError,
OnDemandPagedList, OnDemandPagedList,
@ -387,7 +391,7 @@ class BBCCoUkIE(InfoExtractor):
href, programme_id, ext='mp4', entry_protocol='m3u8_native', href, programme_id, ext='mp4', entry_protocol='m3u8_native',
m3u8_id=format_id, fatal=False) m3u8_id=format_id, fatal=False)
except ExtractorError as e: except ExtractorError as e:
if not (isinstance(e.exc_info[1], urllib.error.HTTPError) if not (isinstance(e.exc_info[1], compat_urllib_error.HTTPError)
and e.exc_info[1].code in (403, 404)): and e.exc_info[1].code in (403, 404)):
raise raise
fmts = [] fmts = []

View File

@ -1,9 +1,13 @@
import codecs import codecs
import json
import re import re
import json
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import compat_ord, compat_urllib_parse_unquote from ..compat import (
compat_chr,
compat_ord,
compat_urllib_parse_unquote,
)
from ..utils import ( from ..utils import (
ExtractorError, ExtractorError,
float_or_none, float_or_none,
@ -12,8 +16,8 @@ from ..utils import (
multipart_encode, multipart_encode,
parse_duration, parse_duration,
random_birthday, random_birthday,
try_get,
urljoin, urljoin,
try_get,
) )
@ -140,7 +144,7 @@ class CDAIE(InfoExtractor):
b = [] b = []
for c in a: for c in a:
f = compat_ord(c) f = compat_ord(c)
b.append(chr(33 + (f + 14) % 94) if 33 <= f <= 126 else chr(f)) b.append(compat_chr(33 + (f + 14) % 94) if 33 <= f <= 126 else compat_chr(f))
a = ''.join(b) a = ''.join(b)
a = a.replace('.cda.mp4', '') a = a.replace('.cda.mp4', '')
for p in ('.2cda.pl', '.3cda.pl'): for p in ('.2cda.pl', '.3cda.pl'):

View File

@ -1,11 +1,11 @@
import itertools import itertools
import json import json
import urllib.parse
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import compat_urllib_parse_unquote_plus
from ..utils import ( from ..utils import (
ExtractorError,
clean_html, clean_html,
ExtractorError,
int_or_none, int_or_none,
str_to_int, str_to_int,
url_or_none, url_or_none,
@ -47,8 +47,8 @@ class ChingariBaseIE(InfoExtractor):
'id': id, 'id': id,
'extractor_key': ChingariIE.ie_key(), 'extractor_key': ChingariIE.ie_key(),
'extractor': 'Chingari', 'extractor': 'Chingari',
'title': urllib.parse.unquote_plus(clean_html(post_data.get('caption'))), 'title': compat_urllib_parse_unquote_plus(clean_html(post_data.get('caption'))),
'description': urllib.parse.unquote_plus(clean_html(post_data.get('caption'))), 'description': compat_urllib_parse_unquote_plus(clean_html(post_data.get('caption'))),
'duration': media_data.get('duration'), 'duration': media_data.get('duration'),
'thumbnail': url_or_none(thumbnail), 'thumbnail': url_or_none(thumbnail),
'like_count': post_data.get('likeCount'), 'like_count': post_data.get('likeCount'),

View File

@ -1,10 +1,6 @@
import base64 import base64
import collections import collections
import getpass
import hashlib import hashlib
import http.client
import http.cookiejar
import http.cookies
import itertools import itertools
import json import json
import math import math
@ -13,12 +9,24 @@ import os
import random import random
import sys import sys
import time import time
import urllib.parse
import urllib.request
import xml.etree.ElementTree import xml.etree.ElementTree
from ..compat import functools, re # isort: split from ..compat import functools, re # isort: split
from ..compat import compat_etree_fromstring, compat_expanduser, compat_os_name from ..compat import (
compat_cookiejar_Cookie,
compat_cookies_SimpleCookie,
compat_etree_fromstring,
compat_expanduser,
compat_getpass,
compat_http_client,
compat_os_name,
compat_str,
compat_urllib_error,
compat_urllib_parse_unquote,
compat_urllib_parse_urlencode,
compat_urllib_request,
compat_urlparse,
)
from ..downloader import FileDownloader from ..downloader import FileDownloader
from ..downloader.f4m import get_base_url, remove_encrypted_media from ..downloader.f4m import get_base_url, remove_encrypted_media
from ..utils import ( from ..utils import (
@ -663,7 +671,7 @@ class InfoExtractor:
if hasattr(e, 'countries'): if hasattr(e, 'countries'):
kwargs['countries'] = e.countries kwargs['countries'] = e.countries
raise type(e)(e.orig_msg, **kwargs) raise type(e)(e.orig_msg, **kwargs)
except http.client.IncompleteRead as e: except compat_http_client.IncompleteRead as e:
raise ExtractorError('A network error has occurred.', cause=e, expected=True, video_id=self.get_temp_id(url)) raise ExtractorError('A network error has occurred.', cause=e, expected=True, video_id=self.get_temp_id(url))
except (KeyError, StopIteration) as e: except (KeyError, StopIteration) as e:
raise ExtractorError('An extractor error has occurred.', cause=e, video_id=self.get_temp_id(url)) raise ExtractorError('An extractor error has occurred.', cause=e, video_id=self.get_temp_id(url))
@ -722,7 +730,7 @@ class InfoExtractor:
@staticmethod @staticmethod
def __can_accept_status_code(err, expected_status): def __can_accept_status_code(err, expected_status):
assert isinstance(err, urllib.error.HTTPError) assert isinstance(err, compat_urllib_error.HTTPError)
if expected_status is None: if expected_status is None:
return False return False
elif callable(expected_status): elif callable(expected_status):
@ -731,7 +739,7 @@ class InfoExtractor:
return err.code in variadic(expected_status) return err.code in variadic(expected_status)
def _create_request(self, url_or_request, data=None, headers={}, query={}): def _create_request(self, url_or_request, data=None, headers={}, query={}):
if isinstance(url_or_request, urllib.request.Request): if isinstance(url_or_request, compat_urllib_request.Request):
return update_Request(url_or_request, data=data, headers=headers, query=query) return update_Request(url_or_request, data=data, headers=headers, query=query)
if query: if query:
url_or_request = update_url_query(url_or_request, query) url_or_request = update_url_query(url_or_request, query)
@ -771,7 +779,7 @@ class InfoExtractor:
try: try:
return self._downloader.urlopen(self._create_request(url_or_request, data, headers, query)) return self._downloader.urlopen(self._create_request(url_or_request, data, headers, query))
except network_exceptions as err: except network_exceptions as err:
if isinstance(err, urllib.error.HTTPError): if isinstance(err, compat_urllib_error.HTTPError):
if self.__can_accept_status_code(err, expected_status): if self.__can_accept_status_code(err, expected_status):
# Retain reference to error to prevent file object from # Retain reference to error to prevent file object from
# being closed before it can be read. Works around the # being closed before it can be read. Works around the
@ -799,7 +807,7 @@ class InfoExtractor:
Arguments: Arguments:
url_or_request -- plain text URL as a string or url_or_request -- plain text URL as a string or
a urllib.request.Request object a compat_urllib_request.Requestobject
video_id -- Video/playlist/item identifier (string) video_id -- Video/playlist/item identifier (string)
Keyword arguments: Keyword arguments:
@ -827,7 +835,7 @@ class InfoExtractor:
""" """
# Strip hashes from the URL (#1038) # Strip hashes from the URL (#1038)
if isinstance(url_or_request, str): if isinstance(url_or_request, (compat_str, str)):
url_or_request = url_or_request.partition('#')[0] url_or_request = url_or_request.partition('#')[0]
urlh = self._request_webpage(url_or_request, video_id, note, errnote, fatal, data=data, headers=headers, query=query, expected_status=expected_status) urlh = self._request_webpage(url_or_request, video_id, note, errnote, fatal, data=data, headers=headers, query=query, expected_status=expected_status)
@ -1048,7 +1056,7 @@ class InfoExtractor:
while True: while True:
try: try:
return self.__download_webpage(url_or_request, video_id, note, errnote, None, fatal, *args, **kwargs) return self.__download_webpage(url_or_request, video_id, note, errnote, None, fatal, *args, **kwargs)
except http.client.IncompleteRead as e: except compat_http_client.IncompleteRead as e:
try_count += 1 try_count += 1
if try_count >= tries: if try_count >= tries:
raise e raise e
@ -1284,7 +1292,7 @@ class InfoExtractor:
if tfa is not None: if tfa is not None:
return tfa return tfa
return getpass.getpass('Type %s and press [Return]: ' % note) return compat_getpass('Type %s and press [Return]: ' % note)
# Helper functions for extracting OpenGraph info # Helper functions for extracting OpenGraph info
@staticmethod @staticmethod
@ -1420,7 +1428,7 @@ class InfoExtractor:
return {} return {}
def _json_ld(self, json_ld, video_id, fatal=True, expected_type=None): def _json_ld(self, json_ld, video_id, fatal=True, expected_type=None):
if isinstance(json_ld, str): if isinstance(json_ld, compat_str):
json_ld = self._parse_json(json_ld, video_id, fatal=fatal) json_ld = self._parse_json(json_ld, video_id, fatal=fatal)
if not json_ld: if not json_ld:
return {} return {}
@ -1510,7 +1518,7 @@ class InfoExtractor:
# both types can have 'name' property(inherited from 'Thing' type). [1] # both types can have 'name' property(inherited from 'Thing' type). [1]
# however some websites are using 'Text' type instead. # however some websites are using 'Text' type instead.
# 1. https://schema.org/VideoObject # 1. https://schema.org/VideoObject
'uploader': author.get('name') if isinstance(author, dict) else author if isinstance(author, str) else None, 'uploader': author.get('name') if isinstance(author, dict) else author if isinstance(author, compat_str) else None,
'filesize': int_or_none(float_or_none(e.get('contentSize'))), 'filesize': int_or_none(float_or_none(e.get('contentSize'))),
'tbr': int_or_none(e.get('bitrate')), 'tbr': int_or_none(e.get('bitrate')),
'width': int_or_none(e.get('width')), 'width': int_or_none(e.get('width')),
@ -2159,7 +2167,7 @@ class InfoExtractor:
]), m3u8_doc) ]), m3u8_doc)
def format_url(url): def format_url(url):
return url if re.match(r'^https?://', url) else urllib.parse.urljoin(m3u8_url, url) return url if re.match(r'^https?://', url) else compat_urlparse.urljoin(m3u8_url, url)
if self.get_param('hls_split_discontinuity', False): if self.get_param('hls_split_discontinuity', False):
def _extract_m3u8_playlist_indices(manifest_url=None, m3u8_doc=None): def _extract_m3u8_playlist_indices(manifest_url=None, m3u8_doc=None):
@ -2532,7 +2540,7 @@ class InfoExtractor:
}) })
continue continue
src_url = src if src.startswith('http') else urllib.parse.urljoin(base, src) src_url = src if src.startswith('http') else compat_urlparse.urljoin(base, src)
src_url = src_url.strip() src_url = src_url.strip()
if proto == 'm3u8' or src_ext == 'm3u8': if proto == 'm3u8' or src_ext == 'm3u8':
@ -2555,7 +2563,7 @@ class InfoExtractor:
'plugin': 'flowplayer-3.2.0.1', 'plugin': 'flowplayer-3.2.0.1',
} }
f4m_url += '&' if '?' in f4m_url else '?' f4m_url += '&' if '?' in f4m_url else '?'
f4m_url += urllib.parse.urlencode(f4m_params) f4m_url += compat_urllib_parse_urlencode(f4m_params)
formats.extend(self._extract_f4m_formats(f4m_url, video_id, f4m_id='hds', fatal=False)) formats.extend(self._extract_f4m_formats(f4m_url, video_id, f4m_id='hds', fatal=False))
elif src_ext == 'mpd': elif src_ext == 'mpd':
formats.extend(self._extract_mpd_formats( formats.extend(self._extract_mpd_formats(
@ -2825,7 +2833,7 @@ class InfoExtractor:
if re.match(r'^https?://', base_url): if re.match(r'^https?://', base_url):
break break
if mpd_base_url and base_url.startswith('/'): if mpd_base_url and base_url.startswith('/'):
base_url = urllib.parse.urljoin(mpd_base_url, base_url) base_url = compat_urlparse.urljoin(mpd_base_url, base_url)
elif mpd_base_url and not re.match(r'^https?://', base_url): elif mpd_base_url and not re.match(r'^https?://', base_url):
if not mpd_base_url.endswith('/'): if not mpd_base_url.endswith('/'):
mpd_base_url += '/' mpd_base_url += '/'
@ -3095,7 +3103,7 @@ class InfoExtractor:
sampling_rate = int_or_none(track.get('SamplingRate')) sampling_rate = int_or_none(track.get('SamplingRate'))
track_url_pattern = re.sub(r'{[Bb]itrate}', track.attrib['Bitrate'], url_pattern) track_url_pattern = re.sub(r'{[Bb]itrate}', track.attrib['Bitrate'], url_pattern)
track_url_pattern = urllib.parse.urljoin(ism_url, track_url_pattern) track_url_pattern = compat_urlparse.urljoin(ism_url, track_url_pattern)
fragments = [] fragments = []
fragment_ctx = { fragment_ctx = {
@ -3114,7 +3122,7 @@ class InfoExtractor:
fragment_ctx['duration'] = (next_fragment_time - fragment_ctx['time']) / fragment_repeat fragment_ctx['duration'] = (next_fragment_time - fragment_ctx['time']) / fragment_repeat
for _ in range(fragment_repeat): for _ in range(fragment_repeat):
fragments.append({ fragments.append({
'url': re.sub(r'{start[ _]time}', str(fragment_ctx['time']), track_url_pattern), 'url': re.sub(r'{start[ _]time}', compat_str(fragment_ctx['time']), track_url_pattern),
'duration': fragment_ctx['duration'] / stream_timescale, 'duration': fragment_ctx['duration'] / stream_timescale,
}) })
fragment_ctx['time'] += fragment_ctx['duration'] fragment_ctx['time'] += fragment_ctx['duration']
@ -3358,7 +3366,7 @@ class InfoExtractor:
return formats, subtitles return formats, subtitles
def _extract_wowza_formats(self, url, video_id, m3u8_entry_protocol='m3u8_native', skip_protocols=[]): def _extract_wowza_formats(self, url, video_id, m3u8_entry_protocol='m3u8_native', skip_protocols=[]):
query = urllib.parse.urlparse(url).query query = compat_urlparse.urlparse(url).query
url = re.sub(r'/(?:manifest|playlist|jwplayer)\.(?:m3u8|f4m|mpd|smil)', '', url) url = re.sub(r'/(?:manifest|playlist|jwplayer)\.(?:m3u8|f4m|mpd|smil)', '', url)
mobj = re.search( mobj = re.search(
r'(?:(?:http|rtmp|rtsp)(?P<s>s)?:)?(?P<url>//[^?]+)', url) r'(?:(?:http|rtmp|rtsp)(?P<s>s)?:)?(?P<url>//[^?]+)', url)
@ -3464,7 +3472,7 @@ class InfoExtractor:
if not isinstance(track, dict): if not isinstance(track, dict):
continue continue
track_kind = track.get('kind') track_kind = track.get('kind')
if not track_kind or not isinstance(track_kind, str): if not track_kind or not isinstance(track_kind, compat_str):
continue continue
if track_kind.lower() not in ('captions', 'subtitles'): if track_kind.lower() not in ('captions', 'subtitles'):
continue continue
@ -3537,7 +3545,7 @@ class InfoExtractor:
# Often no height is provided but there is a label in # Often no height is provided but there is a label in
# format like "1080p", "720p SD", or 1080. # format like "1080p", "720p SD", or 1080.
height = int_or_none(self._search_regex( height = int_or_none(self._search_regex(
r'^(\d{3,4})[pP]?(?:\b|$)', str(source.get('label') or ''), r'^(\d{3,4})[pP]?(?:\b|$)', compat_str(source.get('label') or ''),
'height', default=None)) 'height', default=None))
a_format = { a_format = {
'url': source_url, 'url': source_url,
@ -3589,15 +3597,15 @@ class InfoExtractor:
def _set_cookie(self, domain, name, value, expire_time=None, port=None, def _set_cookie(self, domain, name, value, expire_time=None, port=None,
path='/', secure=False, discard=False, rest={}, **kwargs): path='/', secure=False, discard=False, rest={}, **kwargs):
cookie = http.cookiejar.Cookie( cookie = compat_cookiejar_Cookie(
0, name, value, port, port is not None, domain, True, 0, name, value, port, port is not None, domain, True,
domain.startswith('.'), path, True, secure, expire_time, domain.startswith('.'), path, True, secure, expire_time,
discard, None, None, rest) discard, None, None, rest)
self.cookiejar.set_cookie(cookie) self.cookiejar.set_cookie(cookie)
def _get_cookies(self, url): def _get_cookies(self, url):
""" Return a http.cookies.SimpleCookie with the cookies for the url """ """ Return a compat_cookies_SimpleCookie with the cookies for the url """
return http.cookies.SimpleCookie(self._downloader._calc_cookies(url)) return compat_cookies_SimpleCookie(self._downloader._calc_cookies(url))
def _apply_first_set_cookie_header(self, url_handle, cookie): def _apply_first_set_cookie_header(self, url_handle, cookie):
""" """
@ -3763,10 +3771,10 @@ class InfoExtractor:
return headers return headers
def _generic_id(self, url): def _generic_id(self, url):
return urllib.parse.unquote(os.path.splitext(url.rstrip('/').split('/')[-1])[0]) return compat_urllib_parse_unquote(os.path.splitext(url.rstrip('/').split('/')[-1])[0])
def _generic_title(self, url): def _generic_title(self, url):
return urllib.parse.unquote(os.path.splitext(url_basename(url))[0]) return compat_urllib_parse_unquote(os.path.splitext(url_basename(url))[0])
@staticmethod @staticmethod
def _availability(is_private=None, needs_premium=None, needs_subscription=None, needs_auth=None, is_unlisted=None): def _availability(is_private=None, needs_premium=None, needs_subscription=None, needs_auth=None, is_unlisted=None):

View File

@ -1,6 +1,5 @@
import urllib.parse
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import compat_urlparse
class RtmpIE(InfoExtractor): class RtmpIE(InfoExtractor):
@ -24,7 +23,7 @@ class RtmpIE(InfoExtractor):
'formats': [{ 'formats': [{
'url': url, 'url': url,
'ext': 'flv', 'ext': 'flv',
'format_id': urllib.parse.urlparse(url).scheme, 'format_id': compat_urlparse.urlparse(url).scheme,
}], }],
} }

View File

@ -1,20 +1,19 @@
import base64 import base64
import json
import re import re
import urllib.request import json
import xml.etree.ElementTree
import zlib import zlib
from hashlib import sha1
from math import floor, pow, sqrt
import xml.etree.ElementTree
from hashlib import sha1
from math import pow, sqrt, floor
from .common import InfoExtractor from .common import InfoExtractor
from .vrv import VRVBaseIE from .vrv import VRVBaseIE
from ..aes import aes_cbc_decrypt
from ..compat import ( from ..compat import (
compat_b64decode, compat_b64decode,
compat_etree_fromstring, compat_etree_fromstring,
compat_str, compat_str,
compat_urllib_parse_urlencode, compat_urllib_parse_urlencode,
compat_urllib_request,
compat_urlparse, compat_urlparse,
) )
from ..utils import ( from ..utils import (
@ -23,8 +22,8 @@ from ..utils import (
extract_attributes, extract_attributes,
float_or_none, float_or_none,
format_field, format_field,
int_or_none,
intlist_to_bytes, intlist_to_bytes,
int_or_none,
join_nonempty, join_nonempty,
lowercase_escape, lowercase_escape,
merge_dicts, merge_dicts,
@ -35,6 +34,9 @@ from ..utils import (
try_get, try_get,
xpath_text, xpath_text,
) )
from ..aes import (
aes_cbc_decrypt,
)
class CrunchyrollBaseIE(InfoExtractor): class CrunchyrollBaseIE(InfoExtractor):
@ -257,7 +259,7 @@ class CrunchyrollIE(CrunchyrollBaseIE, VRVBaseIE):
} }
def _download_webpage(self, url_or_request, *args, **kwargs): def _download_webpage(self, url_or_request, *args, **kwargs):
request = (url_or_request if isinstance(url_or_request, urllib.request.Request) request = (url_or_request if isinstance(url_or_request, compat_urllib_request.Request)
else sanitized_Request(url_or_request)) else sanitized_Request(url_or_request))
# Accept-Language must be set explicitly to accept any language to avoid issues # Accept-Language must be set explicitly to accept any language to avoid issues
# similar to https://github.com/ytdl-org/youtube-dl/issues/6797. # similar to https://github.com/ytdl-org/youtube-dl/issues/6797.

View File

@ -1,8 +1,12 @@
import re import re
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import compat_str from ..utils import (
from ..utils import ExtractorError, int_or_none, urlencode_postdata int_or_none,
urlencode_postdata,
compat_str,
ExtractorError,
)
class CuriosityStreamBaseIE(InfoExtractor): class CuriosityStreamBaseIE(InfoExtractor):
@ -46,7 +50,7 @@ class CuriosityStreamIE(CuriosityStreamBaseIE):
IE_NAME = 'curiositystream' IE_NAME = 'curiositystream'
_VALID_URL = r'https?://(?:app\.)?curiositystream\.com/video/(?P<id>\d+)' _VALID_URL = r'https?://(?:app\.)?curiositystream\.com/video/(?P<id>\d+)'
_TESTS = [{ _TESTS = [{
'url': 'http://app.curiositystream.com/video/2', 'url': 'https://app.curiositystream.com/video/2',
'info_dict': { 'info_dict': {
'id': '2', 'id': '2',
'ext': 'mp4', 'ext': 'mp4',

View File

@ -1,10 +1,10 @@
import base64 import base64
import json import json
import re import re
import urllib.parse import urllib
from .adobepass import AdobePassIE
from .common import InfoExtractor from .common import InfoExtractor
from .adobepass import AdobePassIE
from .once import OnceIE from .once import OnceIE
from ..utils import ( from ..utils import (
determine_ext, determine_ext,
@ -197,7 +197,7 @@ class ESPNArticleIE(InfoExtractor):
@classmethod @classmethod
def suitable(cls, url): def suitable(cls, url):
return False if (ESPNIE.suitable(url) or WatchESPNIE.suitable(url)) else super().suitable(url) return False if (ESPNIE.suitable(url) or WatchESPNIE.suitable(url)) else super(ESPNArticleIE, cls).suitable(url)
def _real_extract(self, url): def _real_extract(self, url):
video_id = self._match_id(url) video_id = self._match_id(url)

View File

@ -1,18 +1,18 @@
import json import json
import re import re
import urllib.parse
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import ( from ..compat import (
compat_etree_fromstring, compat_etree_fromstring,
compat_str, compat_str,
compat_urllib_parse_unquote, compat_urllib_parse_unquote,
compat_urllib_parse_unquote_plus,
) )
from ..utils import ( from ..utils import (
ExtractorError,
clean_html, clean_html,
determine_ext, determine_ext,
error_to_compat_str, error_to_compat_str,
ExtractorError,
float_or_none, float_or_none,
get_element_by_id, get_element_by_id,
get_first, get_first,
@ -467,7 +467,7 @@ class FacebookIE(InfoExtractor):
dash_manifest = video.get('dash_manifest') dash_manifest = video.get('dash_manifest')
if dash_manifest: if dash_manifest:
formats.extend(self._parse_mpd_formats( formats.extend(self._parse_mpd_formats(
compat_etree_fromstring(urllib.parse.unquote_plus(dash_manifest)))) compat_etree_fromstring(compat_urllib_parse_unquote_plus(dash_manifest))))
def process_formats(formats): def process_formats(formats):
# Downloads with browser's User-Agent are rate limited. Working around # Downloads with browser's User-Agent are rate limited. Working around

View File

@ -1,6 +1,5 @@
import os import os
import re import re
import urllib.parse
import xml.etree.ElementTree import xml.etree.ElementTree
from .ant1newsgr import Ant1NewsGrEmbedIE from .ant1newsgr import Ant1NewsGrEmbedIE
@ -107,7 +106,12 @@ from .yapfiles import YapFilesIE
from .youporn import YouPornIE from .youporn import YouPornIE
from .youtube import YoutubeIE from .youtube import YoutubeIE
from .zype import ZypeIE from .zype import ZypeIE
from ..compat import compat_etree_fromstring from ..compat import (
compat_etree_fromstring,
compat_str,
compat_urllib_parse_unquote,
compat_urlparse,
)
from ..utils import ( from ..utils import (
KNOWN_EXTENSIONS, KNOWN_EXTENSIONS,
ExtractorError, ExtractorError,
@ -2699,7 +2703,7 @@ class GenericIE(InfoExtractor):
title = self._html_search_meta('DC.title', webpage, fatal=True) title = self._html_search_meta('DC.title', webpage, fatal=True)
camtasia_url = urllib.parse.urljoin(url, camtasia_cfg) camtasia_url = compat_urlparse.urljoin(url, camtasia_cfg)
camtasia_cfg = self._download_xml( camtasia_cfg = self._download_xml(
camtasia_url, video_id, camtasia_url, video_id,
note='Downloading camtasia configuration', note='Downloading camtasia configuration',
@ -2715,7 +2719,7 @@ class GenericIE(InfoExtractor):
entries.append({ entries.append({
'id': os.path.splitext(url_n.text.rpartition('/')[2])[0], 'id': os.path.splitext(url_n.text.rpartition('/')[2])[0],
'title': f'{title} - {n.tag}', 'title': f'{title} - {n.tag}',
'url': urllib.parse.urljoin(url, url_n.text), 'url': compat_urlparse.urljoin(url, url_n.text),
'duration': float_or_none(n.find('./duration').text), 'duration': float_or_none(n.find('./duration').text),
}) })
@ -2767,7 +2771,7 @@ class GenericIE(InfoExtractor):
if url.startswith('//'): if url.startswith('//'):
return self.url_result(self.http_scheme() + url) return self.url_result(self.http_scheme() + url)
parsed_url = urllib.parse.urlparse(url) parsed_url = compat_urlparse.urlparse(url)
if not parsed_url.scheme: if not parsed_url.scheme:
default_search = self.get_param('default_search') default_search = self.get_param('default_search')
if default_search is None: if default_search is None:
@ -2843,7 +2847,7 @@ class GenericIE(InfoExtractor):
m = re.match(r'^(?P<type>audio|video|application(?=/(?:ogg$|(?:vnd\.apple\.|x-)?mpegurl)))/(?P<format_id>[^;\s]+)', content_type) m = re.match(r'^(?P<type>audio|video|application(?=/(?:ogg$|(?:vnd\.apple\.|x-)?mpegurl)))/(?P<format_id>[^;\s]+)', content_type)
if m: if m:
self.report_detected('direct video link') self.report_detected('direct video link')
format_id = str(m.group('format_id')) format_id = compat_str(m.group('format_id'))
subtitles = {} subtitles = {}
if format_id.endswith('mpegurl'): if format_id.endswith('mpegurl'):
formats, subtitles = self._extract_m3u8_formats_and_subtitles(url, video_id, 'mp4') formats, subtitles = self._extract_m3u8_formats_and_subtitles(url, video_id, 'mp4')
@ -2962,7 +2966,7 @@ class GenericIE(InfoExtractor):
# Unescaping the whole page allows to handle those cases in a generic way # Unescaping the whole page allows to handle those cases in a generic way
# FIXME: unescaping the whole page may break URLs, commenting out for now. # FIXME: unescaping the whole page may break URLs, commenting out for now.
# There probably should be a second run of generic extractor on unescaped webpage. # There probably should be a second run of generic extractor on unescaped webpage.
# webpage = urllib.parse.unquote(webpage) # webpage = compat_urllib_parse_unquote(webpage)
# Unescape squarespace embeds to be detected by generic extractor, # Unescape squarespace embeds to be detected by generic extractor,
# see https://github.com/ytdl-org/youtube-dl/issues/21294 # see https://github.com/ytdl-org/youtube-dl/issues/21294
@ -3235,7 +3239,7 @@ class GenericIE(InfoExtractor):
return self.url_result(mobj.group('url')) return self.url_result(mobj.group('url'))
mobj = re.search(r'class=["\']embedly-embed["\'][^>]src=["\'][^"\']*url=(?P<url>[^&]+)', webpage) mobj = re.search(r'class=["\']embedly-embed["\'][^>]src=["\'][^"\']*url=(?P<url>[^&]+)', webpage)
if mobj is not None: if mobj is not None:
return self.url_result(urllib.parse.unquote(mobj.group('url'))) return self.url_result(compat_urllib_parse_unquote(mobj.group('url')))
# Look for funnyordie embed # Look for funnyordie embed
matches = re.findall(r'<iframe[^>]+?src="(https?://(?:www\.)?funnyordie\.com/embed/[^"]+)"', webpage) matches = re.findall(r'<iframe[^>]+?src="(https?://(?:www\.)?funnyordie\.com/embed/[^"]+)"', webpage)
@ -3488,7 +3492,7 @@ class GenericIE(InfoExtractor):
r'<iframe[^>]+src="(?:https?:)?(?P<url>%s)"' % UDNEmbedIE._PROTOCOL_RELATIVE_VALID_URL, webpage) r'<iframe[^>]+src="(?:https?:)?(?P<url>%s)"' % UDNEmbedIE._PROTOCOL_RELATIVE_VALID_URL, webpage)
if mobj is not None: if mobj is not None:
return self.url_result( return self.url_result(
urllib.parse.urljoin(url, mobj.group('url')), 'UDNEmbed') compat_urlparse.urljoin(url, mobj.group('url')), 'UDNEmbed')
# Look for Senate ISVP iframe # Look for Senate ISVP iframe
senate_isvp_url = SenateISVPIE._search_iframe_url(webpage) senate_isvp_url = SenateISVPIE._search_iframe_url(webpage)
@ -3721,7 +3725,7 @@ class GenericIE(InfoExtractor):
if mediasite_urls: if mediasite_urls:
entries = [ entries = [
self.url_result(smuggle_url( self.url_result(smuggle_url(
urllib.parse.urljoin(url, mediasite_url), compat_urlparse.urljoin(url, mediasite_url),
{'UrlReferrer': url}), ie=MediasiteIE.ie_key()) {'UrlReferrer': url}), ie=MediasiteIE.ie_key())
for mediasite_url in mediasite_urls] for mediasite_url in mediasite_urls]
return self.playlist_result(entries, video_id, video_title) return self.playlist_result(entries, video_id, video_title)
@ -3916,11 +3920,11 @@ class GenericIE(InfoExtractor):
subtitles = {} subtitles = {}
for source in sources: for source in sources:
src = source.get('src') src = source.get('src')
if not src or not isinstance(src, str): if not src or not isinstance(src, compat_str):
continue continue
src = urllib.parse.urljoin(url, src) src = compat_urlparse.urljoin(url, src)
src_type = source.get('type') src_type = source.get('type')
if isinstance(src_type, str): if isinstance(src_type, compat_str):
src_type = src_type.lower() src_type = src_type.lower()
ext = determine_ext(src).lower() ext = determine_ext(src).lower()
if src_type == 'video/youtube': if src_type == 'video/youtube':
@ -3954,7 +3958,7 @@ class GenericIE(InfoExtractor):
if not src: if not src:
continue continue
subtitles.setdefault(dict_get(sub, ('language', 'srclang')) or 'und', []).append({ subtitles.setdefault(dict_get(sub, ('language', 'srclang')) or 'und', []).append({
'url': urllib.parse.urljoin(url, src), 'url': compat_urlparse.urljoin(url, src),
'name': sub.get('label'), 'name': sub.get('label'),
'http_headers': { 'http_headers': {
'Referer': full_response.geturl(), 'Referer': full_response.geturl(),
@ -3981,7 +3985,7 @@ class GenericIE(InfoExtractor):
return True return True
if RtmpIE.suitable(vurl): if RtmpIE.suitable(vurl):
return True return True
vpath = urllib.parse.urlparse(vurl).path vpath = compat_urlparse.urlparse(vurl).path
vext = determine_ext(vpath, None) vext = determine_ext(vpath, None)
return vext not in (None, 'swf', 'png', 'jpg', 'srt', 'sbv', 'sub', 'vtt', 'ttml', 'js', 'xml') return vext not in (None, 'swf', 'png', 'jpg', 'srt', 'sbv', 'sub', 'vtt', 'ttml', 'js', 'xml')
@ -4109,7 +4113,7 @@ class GenericIE(InfoExtractor):
if refresh_header: if refresh_header:
found = re.search(REDIRECT_REGEX, refresh_header) found = re.search(REDIRECT_REGEX, refresh_header)
if found: if found:
new_url = urllib.parse.urljoin(url, unescapeHTML(found.group(1))) new_url = compat_urlparse.urljoin(url, unescapeHTML(found.group(1)))
if new_url != url: if new_url != url:
self.report_following_redirect(new_url) self.report_following_redirect(new_url)
return { return {
@ -4135,8 +4139,8 @@ class GenericIE(InfoExtractor):
for video_url in orderedSet(found): for video_url in orderedSet(found):
video_url = unescapeHTML(video_url) video_url = unescapeHTML(video_url)
video_url = video_url.replace('\\/', '/') video_url = video_url.replace('\\/', '/')
video_url = urllib.parse.urljoin(url, video_url) video_url = compat_urlparse.urljoin(url, video_url)
video_id = urllib.parse.unquote(os.path.basename(video_url)) video_id = compat_urllib_parse_unquote(os.path.basename(video_url))
# Sometimes, jwplayer extraction will result in a YouTube URL # Sometimes, jwplayer extraction will result in a YouTube URL
if YoutubeIE.suitable(video_url): if YoutubeIE.suitable(video_url):

View File

@ -1,8 +1,13 @@
import itertools import itertools
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import compat_str from ..utils import (
from ..utils import parse_duration, parse_iso8601, qualities, str_to_int qualities,
compat_str,
parse_duration,
parse_iso8601,
str_to_int,
)
class GigaIE(InfoExtractor): class GigaIE(InfoExtractor):

View File

@ -1,13 +1,13 @@
import re import re
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import compat_str
from ..utils import ( from ..utils import (
clean_html, clean_html,
determine_ext, parse_iso8601,
float_or_none, float_or_none,
int_or_none, int_or_none,
parse_iso8601, compat_str,
determine_ext,
) )

View File

@ -1,16 +1,16 @@
import itertools import itertools
import re import re
import urllib.parse import urllib
from .common import InfoExtractor from .common import InfoExtractor
from ..utils import ( from ..utils import (
int_or_none, int_or_none,
mimetype2ext, mimetype2ext,
remove_end, remove_end,
strip_or_none,
unified_strdate,
url_or_none, url_or_none,
urljoin, urljoin,
unified_strdate,
strip_or_none,
) )

View File

@ -1,10 +1,10 @@
import random import random
import urllib.parse
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import compat_urllib_parse_unquote_plus
from ..utils import ( from ..utils import (
float_or_none,
int_or_none, int_or_none,
float_or_none,
timeconvert, timeconvert,
update_url_query, update_url_query,
xpath_text, xpath_text,
@ -66,7 +66,7 @@ class KUSIIE(InfoExtractor):
formats = [] formats = []
for quality in quality_options: for quality in quality_options:
formats.append({ formats.append({
'url': urllib.parse.unquote_plus(quality.attrib['url']), 'url': compat_urllib_parse_unquote_plus(quality.attrib['url']),
'height': int_or_none(quality.attrib.get('height')), 'height': int_or_none(quality.attrib.get('height')),
'width': int_or_none(quality.attrib.get('width')), 'width': int_or_none(quality.attrib.get('width')),
'vbr': float_or_none(quality.attrib.get('bitratebits'), scale=1000), 'vbr': float_or_none(quality.attrib.get('bitratebits'), scale=1000),

View File

@ -1,7 +1,7 @@
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import compat_str
from ..utils import ( from ..utils import (
clean_html, clean_html,
compat_str,
format_field, format_field,
int_or_none, int_or_none,
parse_iso8601, parse_iso8601,

View File

@ -1,14 +1,17 @@
import json import json
import re import re
import urllib.parse
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import compat_parse_qs, compat_urllib_parse_unquote from ..compat import (
compat_parse_qs,
compat_urllib_parse,
compat_urllib_parse_unquote,
)
from ..utils import ( from ..utils import (
ExtractorError,
determine_ext, determine_ext,
get_element_by_attribute, ExtractorError,
int_or_none, int_or_none,
get_element_by_attribute,
mimetype2ext, mimetype2ext,
) )
@ -140,7 +143,7 @@ class MetacafeIE(InfoExtractor):
headers = { headers = {
# Disable family filter # Disable family filter
'Cookie': 'user=%s; ' % urllib.parse.quote(json.dumps({'ffilter': False})) 'Cookie': 'user=%s; ' % compat_urllib_parse.quote(json.dumps({'ffilter': False}))
} }
# AnyClip videos require the flashversion cookie so that we get the link # AnyClip videos require the flashversion cookie so that we get the link

View File

@ -3,6 +3,7 @@ import itertools
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import ( from ..compat import (
compat_b64decode, compat_b64decode,
compat_chr,
compat_ord, compat_ord,
compat_str, compat_str,
compat_urllib_parse_unquote, compat_urllib_parse_unquote,
@ -71,7 +72,7 @@ class MixcloudIE(MixcloudBaseIE):
def _decrypt_xor_cipher(key, ciphertext): def _decrypt_xor_cipher(key, ciphertext):
"""Encrypt/Decrypt XOR cipher. Both ways are possible because it's XOR.""" """Encrypt/Decrypt XOR cipher. Both ways are possible because it's XOR."""
return ''.join([ return ''.join([
chr(compat_ord(ch) ^ compat_ord(k)) compat_chr(compat_ord(ch) ^ compat_ord(k))
for ch, k in zip(ciphertext, itertools.cycle(key))]) for ch, k in zip(ciphertext, itertools.cycle(key))])
def _real_extract(self, url): def _real_extract(self, url):

View File

@ -1,7 +1,13 @@
import urllib.parse
from .common import InfoExtractor from .common import InfoExtractor
from ..utils import parse_duration, remove_end, unified_strdate, urljoin from ..compat import (
compat_urllib_parse_unquote_plus
)
from ..utils import (
parse_duration,
remove_end,
unified_strdate,
urljoin
)
class NDTVIE(InfoExtractor): class NDTVIE(InfoExtractor):
@ -74,7 +80,7 @@ class NDTVIE(InfoExtractor):
webpage = self._download_webpage(url, video_id) webpage = self._download_webpage(url, video_id)
# '__title' does not contain extra words such as sub-site name, "Video" etc. # '__title' does not contain extra words such as sub-site name, "Video" etc.
title = urllib.parse.unquote_plus( title = compat_urllib_parse_unquote_plus(
self._search_regex(r"__title\s*=\s*'([^']+)'", webpage, 'title', default=None) self._search_regex(r"__title\s*=\s*'([^']+)'", webpage, 'title', default=None)
or self._og_search_title(webpage)) or self._og_search_title(webpage))

View File

@ -1,11 +1,14 @@
import itertools import itertools
import json import json
import time import time
import urllib.error import urllib
import urllib.parse
from ..utils import (
ExtractorError,
parse_iso8601,
try_get,
)
from .common import InfoExtractor from .common import InfoExtractor
from ..utils import ExtractorError, parse_iso8601, try_get
class NebulaBaseIE(InfoExtractor): class NebulaBaseIE(InfoExtractor):

View File

@ -1,12 +1,18 @@
import itertools from hashlib import md5
import re
from base64 import b64encode from base64 import b64encode
from datetime import datetime from datetime import datetime
from hashlib import md5 import re
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import compat_str, compat_urllib_parse_urlencode from ..compat import (
from ..utils import float_or_none, sanitized_Request compat_urllib_parse_urlencode,
compat_str,
compat_itertools_count,
)
from ..utils import (
sanitized_Request,
float_or_none,
)
class NetEaseMusicBaseIE(InfoExtractor): class NetEaseMusicBaseIE(InfoExtractor):
@ -443,7 +449,7 @@ class NetEaseMusicDjRadioIE(NetEaseMusicBaseIE):
name = None name = None
desc = None desc = None
entries = [] entries = []
for offset in itertools.count(start=0, step=self._PAGE_SIZE): for offset in compat_itertools_count(start=0, step=self._PAGE_SIZE):
info = self.query_api( info = self.query_api(
'dj/program/byradio?asc=false&limit=%d&radioId=%s&offset=%d' 'dj/program/byradio?asc=false&limit=%d&radioId=%s&offset=%d'
% (self._PAGE_SIZE, dj_id, offset), % (self._PAGE_SIZE, dj_id, offset),

View File

@ -3,17 +3,18 @@ import random
import re import re
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import compat_HTTPError, compat_str from ..compat import compat_str
from ..utils import ( from ..utils import (
ExtractorError, compat_HTTPError,
determine_ext, determine_ext,
ExtractorError,
int_or_none, int_or_none,
parse_duration, parse_duration,
parse_iso8601, parse_iso8601,
str_or_none, str_or_none,
try_get, try_get,
url_or_none,
urljoin, urljoin,
url_or_none,
) )

View File

@ -1,9 +1,11 @@
import json import json
import re import re
import urllib.parse
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import compat_HTTPError from ..compat import (
compat_HTTPError,
compat_urllib_parse,
)
from ..utils import ( from ..utils import (
ExtractorError, ExtractorError,
float_or_none, float_or_none,
@ -123,7 +125,7 @@ class PelotonIE(InfoExtractor):
is_live = False is_live = False
if ride_data.get('content_format') == 'audio': if ride_data.get('content_format') == 'audio':
url = self._MANIFEST_URL_TEMPLATE % (ride_data.get('vod_stream_url'), urllib.parse.quote(token)) url = self._MANIFEST_URL_TEMPLATE % (ride_data.get('vod_stream_url'), compat_urllib_parse.quote(token))
formats = [{ formats = [{
'url': url, 'url': url,
'ext': 'm4a', 'ext': 'm4a',
@ -136,9 +138,9 @@ class PelotonIE(InfoExtractor):
url = 'https://members.onepeloton.com/.netlify/functions/m3u8-proxy?displayLanguage=en&acceptedSubtitles=%s&url=%s?hdnea=%s' % ( url = 'https://members.onepeloton.com/.netlify/functions/m3u8-proxy?displayLanguage=en&acceptedSubtitles=%s&url=%s?hdnea=%s' % (
','.join([re.sub('^([a-z]+)-([A-Z]+)$', r'\1', caption) for caption in ride_data['captions']]), ','.join([re.sub('^([a-z]+)-([A-Z]+)$', r'\1', caption) for caption in ride_data['captions']]),
ride_data['vod_stream_url'], ride_data['vod_stream_url'],
urllib.parse.quote(urllib.parse.quote(token))) compat_urllib_parse.quote(compat_urllib_parse.quote(token)))
elif ride_data.get('live_stream_url'): elif ride_data.get('live_stream_url'):
url = self._MANIFEST_URL_TEMPLATE % (ride_data.get('live_stream_url'), urllib.parse.quote(token)) url = self._MANIFEST_URL_TEMPLATE % (ride_data.get('live_stream_url'), compat_urllib_parse.quote(token))
is_live = True is_live = True
else: else:
raise ExtractorError('Missing video URL') raise ExtractorError('Missing video URL')

View File

@ -1,9 +1,14 @@
import re import re
import urllib.parse
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import compat_urllib_parse_unquote from ..compat import (
from ..utils import ExtractorError, clean_html compat_urllib_parse_unquote,
compat_urllib_parse_unquote_plus,
)
from ..utils import (
clean_html,
ExtractorError,
)
class PlayvidIE(InfoExtractor): class PlayvidIE(InfoExtractor):
@ -57,7 +62,7 @@ class PlayvidIE(InfoExtractor):
val = videovars_match.group(2) val = videovars_match.group(2)
if key == 'title': if key == 'title':
video_title = urllib.parse.unquote_plus(val) video_title = compat_urllib_parse_unquote_plus(val)
if key == 'duration': if key == 'duration':
try: try:
duration = int(val) duration = int(val)

View File

@ -1,5 +1,8 @@
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import compat_b64decode from ..compat import (
compat_b64decode,
compat_chr,
)
from ..utils import int_or_none from ..utils import int_or_none
@ -47,7 +50,7 @@ class PopcorntimesIE(InfoExtractor):
c_ord += 13 c_ord += 13
if upper < c_ord: if upper < c_ord:
c_ord -= 26 c_ord -= 26
loc_b64 += chr(c_ord) loc_b64 += compat_chr(c_ord)
video_url = compat_b64decode(loc_b64).decode('utf-8') video_url = compat_b64decode(loc_b64).decode('utf-8')

View File

@ -3,26 +3,29 @@ import itertools
import math import math
import operator import operator
import re import re
import urllib.request
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import (
compat_HTTPError,
compat_str,
compat_urllib_request,
)
from .openload import PhantomJSwrapper from .openload import PhantomJSwrapper
from ..compat import compat_HTTPError, compat_str
from ..utils import ( from ..utils import (
NO_DEFAULT,
ExtractorError,
clean_html, clean_html,
determine_ext, determine_ext,
ExtractorError,
format_field, format_field,
int_or_none, int_or_none,
merge_dicts, merge_dicts,
NO_DEFAULT,
orderedSet, orderedSet,
remove_quotes, remove_quotes,
remove_start, remove_start,
str_to_int, str_to_int,
update_url_query, update_url_query,
url_or_none,
urlencode_postdata, urlencode_postdata,
url_or_none,
) )
@ -47,7 +50,7 @@ class PornHubBaseIE(InfoExtractor):
r'document\.location\.reload\(true\)')): r'document\.location\.reload\(true\)')):
url_or_request = args[0] url_or_request = args[0]
url = (url_or_request.get_full_url() url = (url_or_request.get_full_url()
if isinstance(url_or_request, urllib.request.Request) if isinstance(url_or_request, compat_urllib_request.Request)
else url_or_request) else url_or_request)
phantom = PhantomJSwrapper(self, required_version='2.0') phantom = PhantomJSwrapper(self, required_version='2.0')
phantom.get(url, html=webpage) phantom.get(url, html=webpage)

View File

@ -1,6 +1,9 @@
from .prosiebensat1 import ProSiebenSat1BaseIE from .prosiebensat1 import ProSiebenSat1BaseIE
from ..compat import compat_str from ..utils import (
from ..utils import parse_duration, unified_strdate unified_strdate,
parse_duration,
compat_str,
)
class Puls4IE(ProSiebenSat1BaseIE): class Puls4IE(ProSiebenSat1BaseIE):

View File

@ -1,12 +1,14 @@
import base64 import base64
import io import io
import struct
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import compat_b64decode from ..compat import (
compat_b64decode,
compat_struct_unpack,
)
from ..utils import ( from ..utils import (
ExtractorError,
determine_ext, determine_ext,
ExtractorError,
float_or_none, float_or_none,
qualities, qualities,
remove_end, remove_end,
@ -71,7 +73,7 @@ class RTVEALaCartaIE(InfoExtractor):
def _decrypt_url(png): def _decrypt_url(png):
encrypted_data = io.BytesIO(compat_b64decode(png)[8:]) encrypted_data = io.BytesIO(compat_b64decode(png)[8:])
while True: while True:
length = struct.unpack('!I', encrypted_data.read(4))[0] length = compat_struct_unpack('!I', encrypted_data.read(4))[0]
chunk_type = encrypted_data.read(4) chunk_type = encrypted_data.read(4)
if chunk_type == b'IEND': if chunk_type == b'IEND':
break break

View File

@ -1,8 +1,11 @@
import urllib.request
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import compat_parse_qs from ..compat import (
from ..utils import ExtractorError compat_parse_qs,
compat_urllib_request,
)
from ..utils import (
ExtractorError,
)
class ScreencastIE(InfoExtractor): class ScreencastIE(InfoExtractor):
@ -72,7 +75,7 @@ class ScreencastIE(InfoExtractor):
flash_vars_s = flash_vars_s.replace(',', '&') flash_vars_s = flash_vars_s.replace(',', '&')
if flash_vars_s: if flash_vars_s:
flash_vars = compat_parse_qs(flash_vars_s) flash_vars = compat_parse_qs(flash_vars_s)
video_url_raw = urllib.request.quote( video_url_raw = compat_urllib_request.quote(
flash_vars['content'][0]) flash_vars['content'][0])
video_url = video_url_raw.replace('http%3A', 'http:') video_url = video_url_raw.replace('http%3A', 'http:')

View File

@ -1,13 +1,14 @@
import urllib.parse
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import compat_b64decode from ..compat import (
compat_b64decode,
compat_urllib_parse_unquote_plus,
)
from ..utils import ( from ..utils import (
KNOWN_EXTENSIONS,
ExtractorError,
determine_ext, determine_ext,
ExtractorError,
int_or_none, int_or_none,
js_to_json, js_to_json,
KNOWN_EXTENSIONS,
parse_filesize, parse_filesize,
rot47, rot47,
url_or_none, url_or_none,
@ -129,7 +130,7 @@ class VivoIE(SharedBaseIE):
return stream_url return stream_url
def decode_url(encoded_url): def decode_url(encoded_url):
return rot47(urllib.parse.unquote_plus(encoded_url)) return rot47(compat_urllib_parse_unquote_plus(encoded_url))
return decode_url(self._parse_json( return decode_url(self._parse_json(
self._search_regex( self._search_regex(

View File

@ -1,6 +1,6 @@
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import compat_str
from ..utils import ( from ..utils import (
compat_str,
float_or_none, float_or_none,
int_or_none, int_or_none,
smuggle_url, smuggle_url,

View File

@ -1,12 +1,16 @@
import re import re
import urllib.request
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import compat_HTTPError, compat_str, compat_urlparse from ..compat import (
compat_HTTPError,
compat_str,
compat_urllib_request,
compat_urlparse,
)
from ..utils import ( from ..utils import (
ExtractorError,
determine_ext, determine_ext,
extract_attributes, extract_attributes,
ExtractorError,
float_or_none, float_or_none,
int_or_none, int_or_none,
js_to_json, js_to_json,
@ -151,7 +155,7 @@ class UdemyIE(InfoExtractor):
headers['X-Udemy-Bearer-Token'] = cookie.value headers['X-Udemy-Bearer-Token'] = cookie.value
headers['X-Udemy-Authorization'] = 'Bearer %s' % cookie.value headers['X-Udemy-Authorization'] = 'Bearer %s' % cookie.value
if isinstance(url_or_request, urllib.request.Request): if isinstance(url_or_request, compat_urllib_request.Request):
for header, value in headers.items(): for header, value in headers.items():
url_or_request.add_header(header, value) url_or_request.add_header(header, value)
else: else:

View File

@ -1,7 +1,10 @@
import urllib.parse
from .common import InfoExtractor from .common import InfoExtractor
from ..utils import unified_strdate from ..compat import (
compat_urllib_parse,
)
from ..utils import (
unified_strdate,
)
class UrortIE(InfoExtractor): class UrortIE(InfoExtractor):
@ -28,7 +31,7 @@ class UrortIE(InfoExtractor):
def _real_extract(self, url): def _real_extract(self, url):
playlist_id = self._match_id(url) playlist_id = self._match_id(url)
fstr = urllib.parse.quote("InternalBandUrl eq '%s'" % playlist_id) fstr = compat_urllib_parse.quote("InternalBandUrl eq '%s'" % playlist_id)
json_url = 'http://urort.p3.no/breeze/urort/TrackDTOViews?$filter=%s&$orderby=Released%%20desc&$expand=Tags%%2CFiles' % fstr json_url = 'http://urort.p3.no/breeze/urort/TrackDTOViews?$filter=%s&$orderby=Released%%20desc&$expand=Tags%%2CFiles' % fstr
songs = self._download_json(json_url, playlist_id) songs = self._download_json(json_url, playlist_id)
entries = [] entries = []

View File

@ -1,10 +1,8 @@
import random import random
import re import re
import string import string
import struct
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import compat_b64decode, compat_ord
from ..utils import ( from ..utils import (
ExtractorError, ExtractorError,
int_or_none, int_or_none,
@ -16,6 +14,11 @@ from ..utils import (
xpath_element, xpath_element,
xpath_text, xpath_text,
) )
from ..compat import (
compat_b64decode,
compat_ord,
compat_struct_pack,
)
class VideaIE(InfoExtractor): class VideaIE(InfoExtractor):
@ -99,7 +102,7 @@ class VideaIE(InfoExtractor):
j = (j + S[i]) % 256 j = (j + S[i]) % 256
S[i], S[j] = S[j], S[i] S[i], S[j] = S[j], S[i]
k = S[(S[i] + S[j]) % 256] k = S[(S[i] + S[j]) % 256]
res += struct.pack('B', k ^ compat_ord(cipher_text[m])) res += compat_struct_pack('B', k ^ compat_ord(cipher_text[m]))
return res.decode() return res.decode()

View File

@ -1,14 +1,17 @@
import base64 import base64
import json
import hashlib import hashlib
import hmac import hmac
import json
import random import random
import string import string
import time import time
import urllib.parse
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import compat_HTTPError, compat_urllib_parse_urlencode from ..compat import (
compat_HTTPError,
compat_urllib_parse_urlencode,
compat_urllib_parse,
)
from ..utils import ( from ..utils import (
ExtractorError, ExtractorError,
float_or_none, float_or_none,
@ -43,12 +46,12 @@ class VRVBaseIE(InfoExtractor):
headers['Content-Type'] = 'application/json' headers['Content-Type'] = 'application/json'
base_string = '&'.join([ base_string = '&'.join([
'POST' if data else 'GET', 'POST' if data else 'GET',
urllib.parse.quote(base_url, ''), compat_urllib_parse.quote(base_url, ''),
urllib.parse.quote(encoded_query, '')]) compat_urllib_parse.quote(encoded_query, '')])
oauth_signature = base64.b64encode(hmac.new( oauth_signature = base64.b64encode(hmac.new(
(self._API_PARAMS['oAuthSecret'] + '&' + self._TOKEN_SECRET).encode('ascii'), (self._API_PARAMS['oAuthSecret'] + '&' + self._TOKEN_SECRET).encode('ascii'),
base_string.encode(), hashlib.sha1).digest()).decode() base_string.encode(), hashlib.sha1).digest()).decode()
encoded_query += '&oauth_signature=' + urllib.parse.quote(oauth_signature, '') encoded_query += '&oauth_signature=' + compat_urllib_parse.quote(oauth_signature, '')
try: try:
return self._download_json( return self._download_json(
'?'.join([base_url, encoded_query]), video_id, '?'.join([base_url, encoded_query]), video_id,

Some files were not shown because too many files have changed in this diff Show More