mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2024-11-15 13:43:04 +00:00
Compare commits
No commits in common. "2cb19820430aa8f7fe8cef11203d9f98388ef8ab" and "4ce05f57599961c853253398b993c94efb504048" have entirely different histories.
2cb1982043
...
4ce05f5759
9
.github/workflows/core.yml
vendored
9
.github/workflows/core.yml
vendored
@ -10,15 +10,12 @@ jobs:
|
|||||||
matrix:
|
matrix:
|
||||||
os: [ubuntu-latest]
|
os: [ubuntu-latest]
|
||||||
# CPython 3.9 is in quick-test
|
# CPython 3.9 is in quick-test
|
||||||
python-version: ['3.6', '3.7', '3.10', 3.11-dev, pypy-3.6, pypy-3.7, pypy-3.8]
|
python-version: ['3.6', '3.7', '3.10', 3.11-dev, pypy-3.6, pypy-3.7, pypy-3.8, pypy-3.9]
|
||||||
run-tests-ext: [sh]
|
run-tests-ext: [sh]
|
||||||
include:
|
include:
|
||||||
# atleast one of each CPython/PyPy tests must be in windows
|
# atleast one of the tests must be in windows
|
||||||
- os: windows-latest
|
- os: windows-latest
|
||||||
python-version: '3.8'
|
python-version: 3.8
|
||||||
run-tests-ext: bat
|
|
||||||
- os: windows-latest
|
|
||||||
python-version: pypy-3.9
|
|
||||||
run-tests-ext: bat
|
run-tests-ext: bat
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v2
|
||||||
|
8
.github/workflows/download.yml
vendored
8
.github/workflows/download.yml
vendored
@ -9,15 +9,11 @@ jobs:
|
|||||||
fail-fast: true
|
fail-fast: true
|
||||||
matrix:
|
matrix:
|
||||||
os: [ubuntu-latest]
|
os: [ubuntu-latest]
|
||||||
python-version: ['3.6', '3.7', '3.9', '3.10', 3.11-dev, pypy-3.6, pypy-3.7, pypy-3.8]
|
python-version: ['3.6', '3.7', '3.9', '3.10', 3.11-dev, pypy-3.6, pypy-3.7, pypy-3.8, pypy-3.9]
|
||||||
run-tests-ext: [sh]
|
run-tests-ext: [sh]
|
||||||
include:
|
include:
|
||||||
# atleast one of each CPython/PyPy tests must be in windows
|
|
||||||
- os: windows-latest
|
- os: windows-latest
|
||||||
python-version: '3.8'
|
python-version: 3.8
|
||||||
run-tests-ext: bat
|
|
||||||
- os: windows-latest
|
|
||||||
python-version: pypy-3.9
|
|
||||||
run-tests-ext: bat
|
run-tests-ext: bat
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v2
|
||||||
|
@ -325,7 +325,6 @@ You can also fork the project on github and run your fork's [build workflow](.gi
|
|||||||
-h, --help Print this help text and exit
|
-h, --help Print this help text and exit
|
||||||
--version Print program version and exit
|
--version Print program version and exit
|
||||||
-U, --update Update this program to latest version
|
-U, --update Update this program to latest version
|
||||||
--no-update Do not update (default)
|
|
||||||
-i, --ignore-errors Ignore download and postprocessing errors.
|
-i, --ignore-errors Ignore download and postprocessing errors.
|
||||||
The download will be considered successful
|
The download will be considered successful
|
||||||
even if the postprocessing fails
|
even if the postprocessing fails
|
||||||
|
@ -3060,7 +3060,7 @@ class YoutubeDL:
|
|||||||
|
|
||||||
success = True
|
success = True
|
||||||
merger, fd = FFmpegMergerPP(self), None
|
merger, fd = FFmpegMergerPP(self), None
|
||||||
if info_dict.get('protocol') or info_dict.get('url'):
|
if info_dict.get('url'):
|
||||||
fd = get_suitable_downloader(info_dict, self.params, to_stdout=temp_filename == '-')
|
fd = get_suitable_downloader(info_dict, self.params, to_stdout=temp_filename == '-')
|
||||||
if fd is not FFmpegFD and (
|
if fd is not FFmpegFD and (
|
||||||
info_dict.get('section_start') or info_dict.get('section_end')):
|
info_dict.get('section_start') or info_dict.get('section_end')):
|
||||||
|
@ -1419,10 +1419,6 @@ class InfoExtractor:
|
|||||||
'ViewAction': 'view',
|
'ViewAction': 'view',
|
||||||
}
|
}
|
||||||
|
|
||||||
def is_type(e, *expected_types):
|
|
||||||
type = variadic(traverse_obj(e, '@type'))
|
|
||||||
return any(x in type for x in expected_types)
|
|
||||||
|
|
||||||
def extract_interaction_type(e):
|
def extract_interaction_type(e):
|
||||||
interaction_type = e.get('interactionType')
|
interaction_type = e.get('interactionType')
|
||||||
if isinstance(interaction_type, dict):
|
if isinstance(interaction_type, dict):
|
||||||
@ -1436,7 +1432,9 @@ class InfoExtractor:
|
|||||||
if not isinstance(interaction_statistic, list):
|
if not isinstance(interaction_statistic, list):
|
||||||
return
|
return
|
||||||
for is_e in interaction_statistic:
|
for is_e in interaction_statistic:
|
||||||
if not is_type(is_e, 'InteractionCounter'):
|
if not isinstance(is_e, dict):
|
||||||
|
continue
|
||||||
|
if is_e.get('@type') != 'InteractionCounter':
|
||||||
continue
|
continue
|
||||||
interaction_type = extract_interaction_type(is_e)
|
interaction_type = extract_interaction_type(is_e)
|
||||||
if not interaction_type:
|
if not interaction_type:
|
||||||
@ -1473,7 +1471,7 @@ class InfoExtractor:
|
|||||||
info['chapters'] = chapters
|
info['chapters'] = chapters
|
||||||
|
|
||||||
def extract_video_object(e):
|
def extract_video_object(e):
|
||||||
assert is_type(e, 'VideoObject')
|
assert e['@type'] == 'VideoObject'
|
||||||
author = e.get('author')
|
author = e.get('author')
|
||||||
info.update({
|
info.update({
|
||||||
'url': traverse_obj(e, 'contentUrl', 'embedUrl', expected_type=url_or_none),
|
'url': traverse_obj(e, 'contentUrl', 'embedUrl', expected_type=url_or_none),
|
||||||
@ -1505,12 +1503,13 @@ class InfoExtractor:
|
|||||||
if at_top_level and set(e.keys()) == {'@context', '@graph'}:
|
if at_top_level and set(e.keys()) == {'@context', '@graph'}:
|
||||||
traverse_json_ld(variadic(e['@graph'], allowed_types=(dict,)), at_top_level=False)
|
traverse_json_ld(variadic(e['@graph'], allowed_types=(dict,)), at_top_level=False)
|
||||||
break
|
break
|
||||||
if expected_type is not None and not is_type(e, expected_type):
|
item_type = e.get('@type')
|
||||||
|
if expected_type is not None and expected_type != item_type:
|
||||||
continue
|
continue
|
||||||
rating = traverse_obj(e, ('aggregateRating', 'ratingValue'), expected_type=float_or_none)
|
rating = traverse_obj(e, ('aggregateRating', 'ratingValue'), expected_type=float_or_none)
|
||||||
if rating is not None:
|
if rating is not None:
|
||||||
info['average_rating'] = rating
|
info['average_rating'] = rating
|
||||||
if is_type(e, 'TVEpisode', 'Episode'):
|
if item_type in ('TVEpisode', 'Episode'):
|
||||||
episode_name = unescapeHTML(e.get('name'))
|
episode_name = unescapeHTML(e.get('name'))
|
||||||
info.update({
|
info.update({
|
||||||
'episode': episode_name,
|
'episode': episode_name,
|
||||||
@ -1520,39 +1519,39 @@ class InfoExtractor:
|
|||||||
if not info.get('title') and episode_name:
|
if not info.get('title') and episode_name:
|
||||||
info['title'] = episode_name
|
info['title'] = episode_name
|
||||||
part_of_season = e.get('partOfSeason')
|
part_of_season = e.get('partOfSeason')
|
||||||
if is_type(part_of_season, 'TVSeason', 'Season', 'CreativeWorkSeason'):
|
if isinstance(part_of_season, dict) and part_of_season.get('@type') in ('TVSeason', 'Season', 'CreativeWorkSeason'):
|
||||||
info.update({
|
info.update({
|
||||||
'season': unescapeHTML(part_of_season.get('name')),
|
'season': unescapeHTML(part_of_season.get('name')),
|
||||||
'season_number': int_or_none(part_of_season.get('seasonNumber')),
|
'season_number': int_or_none(part_of_season.get('seasonNumber')),
|
||||||
})
|
})
|
||||||
part_of_series = e.get('partOfSeries') or e.get('partOfTVSeries')
|
part_of_series = e.get('partOfSeries') or e.get('partOfTVSeries')
|
||||||
if is_type(part_of_series, 'TVSeries', 'Series', 'CreativeWorkSeries'):
|
if isinstance(part_of_series, dict) and part_of_series.get('@type') in ('TVSeries', 'Series', 'CreativeWorkSeries'):
|
||||||
info['series'] = unescapeHTML(part_of_series.get('name'))
|
info['series'] = unescapeHTML(part_of_series.get('name'))
|
||||||
elif is_type(e, 'Movie'):
|
elif item_type == 'Movie':
|
||||||
info.update({
|
info.update({
|
||||||
'title': unescapeHTML(e.get('name')),
|
'title': unescapeHTML(e.get('name')),
|
||||||
'description': unescapeHTML(e.get('description')),
|
'description': unescapeHTML(e.get('description')),
|
||||||
'duration': parse_duration(e.get('duration')),
|
'duration': parse_duration(e.get('duration')),
|
||||||
'timestamp': unified_timestamp(e.get('dateCreated')),
|
'timestamp': unified_timestamp(e.get('dateCreated')),
|
||||||
})
|
})
|
||||||
elif is_type(e, 'Article', 'NewsArticle'):
|
elif item_type in ('Article', 'NewsArticle'):
|
||||||
info.update({
|
info.update({
|
||||||
'timestamp': parse_iso8601(e.get('datePublished')),
|
'timestamp': parse_iso8601(e.get('datePublished')),
|
||||||
'title': unescapeHTML(e.get('headline')),
|
'title': unescapeHTML(e.get('headline')),
|
||||||
'description': unescapeHTML(e.get('articleBody') or e.get('description')),
|
'description': unescapeHTML(e.get('articleBody') or e.get('description')),
|
||||||
})
|
})
|
||||||
if is_type(traverse_obj(e, ('video', 0)), 'VideoObject'):
|
if traverse_obj(e, ('video', 0, '@type')) == 'VideoObject':
|
||||||
extract_video_object(e['video'][0])
|
extract_video_object(e['video'][0])
|
||||||
elif is_type(traverse_obj(e, ('subjectOf', 0)), 'VideoObject'):
|
elif traverse_obj(e, ('subjectOf', 0, '@type')) == 'VideoObject':
|
||||||
extract_video_object(e['subjectOf'][0])
|
extract_video_object(e['subjectOf'][0])
|
||||||
elif is_type(e, 'VideoObject'):
|
elif item_type == 'VideoObject':
|
||||||
extract_video_object(e)
|
extract_video_object(e)
|
||||||
if expected_type is None:
|
if expected_type is None:
|
||||||
continue
|
continue
|
||||||
else:
|
else:
|
||||||
break
|
break
|
||||||
video = e.get('video')
|
video = e.get('video')
|
||||||
if is_type(video, 'VideoObject'):
|
if isinstance(video, dict) and video.get('@type') == 'VideoObject':
|
||||||
extract_video_object(video)
|
extract_video_object(video)
|
||||||
if expected_type is None:
|
if expected_type is None:
|
||||||
continue
|
continue
|
||||||
|
@ -2563,56 +2563,6 @@ class GenericIE(InfoExtractor):
|
|||||||
}
|
}
|
||||||
}]
|
}]
|
||||||
},
|
},
|
||||||
{
|
|
||||||
'note': 'Rumble embed',
|
|
||||||
'url': 'https://rumble.com/vdmum1-moose-the-dog-helps-girls-dig-a-snow-fort.html',
|
|
||||||
'md5': '53af34098a7f92c4e51cf0bd1c33f009',
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'vb0ofn',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'timestamp': 1612662578,
|
|
||||||
'uploader': 'LovingMontana',
|
|
||||||
'channel': 'LovingMontana',
|
|
||||||
'upload_date': '20210207',
|
|
||||||
'title': 'Winter-loving dog helps girls dig a snow fort ',
|
|
||||||
'channel_url': 'https://rumble.com/c/c-546523',
|
|
||||||
'thumbnail': 'https://sp.rmbl.ws/s8/1/5/f/x/x/5fxxb.OvCc.1-small-Moose-The-Dog-Helps-Girls-D.jpg',
|
|
||||||
'duration': 103,
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
'note': 'Rumble JS embed',
|
|
||||||
'url': 'https://therightscoop.com/what-does-9-plus-1-plus-1-equal-listen-to-this-audio-of-attempted-kavanaugh-assassins-call-and-youll-get-it',
|
|
||||||
'md5': '4701209ac99095592e73dbba21889690',
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'v15eqxl',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'channel': 'Mr Producer Media',
|
|
||||||
'duration': 92,
|
|
||||||
'title': '911 Audio From The Man Who Wanted To Kill Supreme Court Justice Kavanaugh',
|
|
||||||
'channel_url': 'https://rumble.com/c/RichSementa',
|
|
||||||
'thumbnail': 'https://sp.rmbl.ws/s8/1/P/j/f/A/PjfAe.OvCc-small-911-Audio-From-The-Man-Who-.jpg',
|
|
||||||
'timestamp': 1654892716,
|
|
||||||
'uploader': 'Mr Producer Media',
|
|
||||||
'upload_date': '20220610',
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
'note': 'JSON LD with multiple @type',
|
|
||||||
'url': 'https://www.nu.nl/280161/video/hoe-een-bladvlo-dit-verwoestende-japanse-onkruid-moet-vernietigen.html',
|
|
||||||
'md5': 'c7949f34f57273013fb7ccb1156393db',
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'ipy2AcGL',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'description': 'md5:6a9d644bab0dc2dc06849c2505d8383d',
|
|
||||||
'thumbnail': r're:https://media\.nu\.nl/m/.+\.jpg',
|
|
||||||
'title': 'Hoe een bladvlo dit verwoestende Japanse onkruid moet vernietigen',
|
|
||||||
'timestamp': 1586577474,
|
|
||||||
'upload_date': '20200411',
|
|
||||||
'age_limit': 0,
|
|
||||||
'duration': 111.0,
|
|
||||||
}
|
|
||||||
},
|
|
||||||
]
|
]
|
||||||
|
|
||||||
def report_following_redirect(self, new_url):
|
def report_following_redirect(self, new_url):
|
||||||
@ -3941,10 +3891,15 @@ class GenericIE(InfoExtractor):
|
|||||||
json_ld = self._search_json_ld(webpage, video_id, default={})
|
json_ld = self._search_json_ld(webpage, video_id, default={})
|
||||||
if json_ld.get('url') not in (url, None):
|
if json_ld.get('url') not in (url, None):
|
||||||
self.report_detected('JSON LD')
|
self.report_detected('JSON LD')
|
||||||
return merge_dicts({
|
if determine_ext(json_ld['url']) == 'm3u8':
|
||||||
'_type': 'url_transparent',
|
json_ld['formats'], json_ld['subtitles'] = self._extract_m3u8_formats_and_subtitles(
|
||||||
'url': smuggle_url(json_ld['url'], {'force_videoid': video_id, 'to_generic': True}),
|
json_ld['url'], video_id, 'mp4')
|
||||||
}, json_ld, info_dict)
|
json_ld.pop('url')
|
||||||
|
self._sort_formats(json_ld['formats'])
|
||||||
|
else:
|
||||||
|
json_ld['_type'] = 'url_transparent'
|
||||||
|
json_ld['url'] = smuggle_url(json_ld['url'], {'force_videoid': video_id, 'to_generic': True})
|
||||||
|
return merge_dicts(json_ld, info_dict)
|
||||||
|
|
||||||
def check_video(vurl):
|
def check_video(vurl):
|
||||||
if YoutubeIE.suitable(vurl):
|
if YoutubeIE.suitable(vurl):
|
||||||
|
@ -5,7 +5,7 @@ from ..utils import unsmuggle_url
|
|||||||
|
|
||||||
|
|
||||||
class JWPlatformIE(InfoExtractor):
|
class JWPlatformIE(InfoExtractor):
|
||||||
_VALID_URL = r'(?:https?://(?:content\.jwplatform|cdn\.jwplayer)\.com/(?:(?:feed|player|thumb|preview|manifest)s|jw6|v2/media)/|jwplatform:)(?P<id>[a-zA-Z0-9]{8})'
|
_VALID_URL = r'(?:https?://(?:content\.jwplatform|cdn\.jwplayer)\.com/(?:(?:feed|player|thumb|preview)s|jw6|v2/media)/|jwplatform:)(?P<id>[a-zA-Z0-9]{8})'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://content.jwplatform.com/players/nPripu9l-ALJ3XQCI.js',
|
'url': 'http://content.jwplatform.com/players/nPripu9l-ALJ3XQCI.js',
|
||||||
'md5': 'fa8899fa601eb7c83a64e9d568bdf325',
|
'md5': 'fa8899fa601eb7c83a64e9d568bdf325',
|
||||||
|
@ -50,14 +50,13 @@ class RumbleEmbedIE(InfoExtractor):
|
|||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
@classmethod
|
@staticmethod
|
||||||
def _extract_urls(cls, webpage):
|
def _extract_urls(webpage):
|
||||||
embeds = tuple(re.finditer(
|
return [
|
||||||
fr'(?:<(?:script|iframe)[^>]+\bsrc=|["\']embedUrl["\']\s*:\s*)["\'](?P<url>{cls._VALID_URL})', webpage))
|
mobj.group('url')
|
||||||
if embeds:
|
for mobj in re.finditer(
|
||||||
return [mobj.group('url') for mobj in embeds]
|
r'(?:<(?:script|iframe)[^>]+\bsrc=|["\']embedUrl["\']\s*:\s*)["\'](?P<url>%s)' % RumbleEmbedIE._VALID_URL,
|
||||||
return [f'https://rumble.com/embed/{mobj.group("id")}' for mobj in re.finditer(
|
webpage)]
|
||||||
r'<script>\s*Rumble\(\s*"play"\s*,\s*{\s*[\'"]video[\'"]\s*:\s*[\'"](?P<id>[0-9a-z]+)[\'"]', webpage)]
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
video_id = self._match_id(url)
|
||||||
|
@ -69,7 +69,7 @@ class ZDFBaseIE(InfoExtractor):
|
|||||||
f.update({
|
f.update({
|
||||||
'url': format_url,
|
'url': format_url,
|
||||||
'format_id': join_nonempty('http', meta.get('type'), meta.get('quality')),
|
'format_id': join_nonempty('http', meta.get('type'), meta.get('quality')),
|
||||||
'tbr': int_or_none(self._search_regex(r'_(\d+)k_', format_url, 'tbr', default=None))
|
'tbr': int_or_none(self._search_regex(r'_(\d+)k_', format_url, default=None))
|
||||||
})
|
})
|
||||||
new_formats = [f]
|
new_formats = [f]
|
||||||
formats.extend(merge_dicts(f, {
|
formats.extend(merge_dicts(f, {
|
||||||
|
@ -312,10 +312,6 @@ def create_parser():
|
|||||||
'-U', '--update',
|
'-U', '--update',
|
||||||
action='store_true', dest='update_self',
|
action='store_true', dest='update_self',
|
||||||
help='Update this program to latest version')
|
help='Update this program to latest version')
|
||||||
general.add_option(
|
|
||||||
'--no-update',
|
|
||||||
action='store_false', dest='update_self',
|
|
||||||
help='Do not update (default)')
|
|
||||||
general.add_option(
|
general.add_option(
|
||||||
'-i', '--ignore-errors',
|
'-i', '--ignore-errors',
|
||||||
action='store_true', dest='ignoreerrors',
|
action='store_true', dest='ignoreerrors',
|
||||||
|
@ -2003,8 +2003,7 @@ if sys.platform == 'win32':
|
|||||||
if not LockFileEx(msvcrt.get_osfhandle(f.fileno()),
|
if not LockFileEx(msvcrt.get_osfhandle(f.fileno()),
|
||||||
(0x2 if exclusive else 0x0) | (0x0 if block else 0x1),
|
(0x2 if exclusive else 0x0) | (0x0 if block else 0x1),
|
||||||
0, whole_low, whole_high, f._lock_file_overlapped_p):
|
0, whole_low, whole_high, f._lock_file_overlapped_p):
|
||||||
# NB: No argument form of "ctypes.FormatError" does not work on PyPy
|
raise BlockingIOError('Locking file failed: %r' % ctypes.FormatError())
|
||||||
raise BlockingIOError(f'Locking file failed: {ctypes.FormatError(ctypes.GetLastError())!r}')
|
|
||||||
|
|
||||||
def _unlock_file(f):
|
def _unlock_file(f):
|
||||||
assert f._lock_file_overlapped_p
|
assert f._lock_file_overlapped_p
|
||||||
|
Loading…
Reference in New Issue
Block a user