mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2024-11-15 21:53:21 +00:00
Compare commits
No commits in common. "f1e2d4a9a21a17c0cc8132b248b81092aeb88206" and "0b5583b112d418ba4d4eefcde1cd4d54ab95458a" have entirely different histories.
f1e2d4a9a2
...
0b5583b112
15
.github/ISSUE_TEMPLATE/1_broken_site.yml
vendored
15
.github/ISSUE_TEMPLATE/1_broken_site.yml
vendored
@ -2,13 +2,6 @@ name: Broken site
|
|||||||
description: Report broken or misfunctioning site
|
description: Report broken or misfunctioning site
|
||||||
labels: [triage, site-bug]
|
labels: [triage, site-bug]
|
||||||
body:
|
body:
|
||||||
- type: checkboxes
|
|
||||||
attributes:
|
|
||||||
label: DO NOT REMOVE OR SKIP THE ISSUE TEMPLATE
|
|
||||||
description: Fill all fields even if you think it is irrelevant for the issue
|
|
||||||
options:
|
|
||||||
- label: I understand that I will be **blocked** if I remove or skip any mandatory\* field
|
|
||||||
required: true
|
|
||||||
- type: checkboxes
|
- type: checkboxes
|
||||||
id: checklist
|
id: checklist
|
||||||
attributes:
|
attributes:
|
||||||
@ -18,7 +11,7 @@ body:
|
|||||||
options:
|
options:
|
||||||
- label: I'm reporting a broken site
|
- label: I'm reporting a broken site
|
||||||
required: true
|
required: true
|
||||||
- label: I've verified that I'm running yt-dlp version **2022.08.08** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
|
- label: I've verified that I'm running yt-dlp version **2022.07.18** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
|
||||||
required: true
|
required: true
|
||||||
- label: I've checked that all provided URLs are playable in a browser with the same IP and same login details
|
- label: I've checked that all provided URLs are playable in a browser with the same IP and same login details
|
||||||
required: true
|
required: true
|
||||||
@ -62,7 +55,7 @@ body:
|
|||||||
[debug] Command-line config: ['-vU', 'test:youtube']
|
[debug] Command-line config: ['-vU', 'test:youtube']
|
||||||
[debug] Portable config "yt-dlp.conf": ['-i']
|
[debug] Portable config "yt-dlp.conf": ['-i']
|
||||||
[debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8
|
[debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8
|
||||||
[debug] yt-dlp version 2022.08.08 [9d339c4] (win32_exe)
|
[debug] yt-dlp version 2022.07.18 [9d339c4] (win32_exe)
|
||||||
[debug] Python 3.8.10 (CPython 64bit) - Windows-10-10.0.22000-SP0
|
[debug] Python 3.8.10 (CPython 64bit) - Windows-10-10.0.22000-SP0
|
||||||
[debug] Checking exe version: ffmpeg -bsfs
|
[debug] Checking exe version: ffmpeg -bsfs
|
||||||
[debug] Checking exe version: ffprobe -bsfs
|
[debug] Checking exe version: ffprobe -bsfs
|
||||||
@ -70,8 +63,8 @@ body:
|
|||||||
[debug] Optional libraries: Cryptodome-3.15.0, brotli-1.0.9, certifi-2022.06.15, mutagen-1.45.1, sqlite3-2.6.0, websockets-10.3
|
[debug] Optional libraries: Cryptodome-3.15.0, brotli-1.0.9, certifi-2022.06.15, mutagen-1.45.1, sqlite3-2.6.0, websockets-10.3
|
||||||
[debug] Proxy map: {}
|
[debug] Proxy map: {}
|
||||||
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest
|
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest
|
||||||
Latest version: 2022.08.08, Current version: 2022.08.08
|
Latest version: 2022.07.18, Current version: 2022.07.18
|
||||||
yt-dlp is up to date (2022.08.08)
|
yt-dlp is up to date (2022.07.18)
|
||||||
<more lines>
|
<more lines>
|
||||||
render: shell
|
render: shell
|
||||||
validations:
|
validations:
|
||||||
|
@ -2,13 +2,6 @@ name: Site support request
|
|||||||
description: Request support for a new site
|
description: Request support for a new site
|
||||||
labels: [triage, site-request]
|
labels: [triage, site-request]
|
||||||
body:
|
body:
|
||||||
- type: checkboxes
|
|
||||||
attributes:
|
|
||||||
label: DO NOT REMOVE OR SKIP THE ISSUE TEMPLATE
|
|
||||||
description: Fill all fields even if you think it is irrelevant for the issue
|
|
||||||
options:
|
|
||||||
- label: I understand that I will be **blocked** if I remove or skip any mandatory\* field
|
|
||||||
required: true
|
|
||||||
- type: checkboxes
|
- type: checkboxes
|
||||||
id: checklist
|
id: checklist
|
||||||
attributes:
|
attributes:
|
||||||
@ -18,7 +11,7 @@ body:
|
|||||||
options:
|
options:
|
||||||
- label: I'm reporting a new site support request
|
- label: I'm reporting a new site support request
|
||||||
required: true
|
required: true
|
||||||
- label: I've verified that I'm running yt-dlp version **2022.08.08** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
|
- label: I've verified that I'm running yt-dlp version **2022.07.18** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
|
||||||
required: true
|
required: true
|
||||||
- label: I've checked that all provided URLs are playable in a browser with the same IP and same login details
|
- label: I've checked that all provided URLs are playable in a browser with the same IP and same login details
|
||||||
required: true
|
required: true
|
||||||
@ -74,7 +67,7 @@ body:
|
|||||||
[debug] Command-line config: ['-vU', 'test:youtube']
|
[debug] Command-line config: ['-vU', 'test:youtube']
|
||||||
[debug] Portable config "yt-dlp.conf": ['-i']
|
[debug] Portable config "yt-dlp.conf": ['-i']
|
||||||
[debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8
|
[debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8
|
||||||
[debug] yt-dlp version 2022.08.08 [9d339c4] (win32_exe)
|
[debug] yt-dlp version 2022.07.18 [9d339c4] (win32_exe)
|
||||||
[debug] Python 3.8.10 (CPython 64bit) - Windows-10-10.0.22000-SP0
|
[debug] Python 3.8.10 (CPython 64bit) - Windows-10-10.0.22000-SP0
|
||||||
[debug] Checking exe version: ffmpeg -bsfs
|
[debug] Checking exe version: ffmpeg -bsfs
|
||||||
[debug] Checking exe version: ffprobe -bsfs
|
[debug] Checking exe version: ffprobe -bsfs
|
||||||
@ -82,8 +75,8 @@ body:
|
|||||||
[debug] Optional libraries: Cryptodome-3.15.0, brotli-1.0.9, certifi-2022.06.15, mutagen-1.45.1, sqlite3-2.6.0, websockets-10.3
|
[debug] Optional libraries: Cryptodome-3.15.0, brotli-1.0.9, certifi-2022.06.15, mutagen-1.45.1, sqlite3-2.6.0, websockets-10.3
|
||||||
[debug] Proxy map: {}
|
[debug] Proxy map: {}
|
||||||
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest
|
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest
|
||||||
Latest version: 2022.08.08, Current version: 2022.08.08
|
Latest version: 2022.07.18, Current version: 2022.07.18
|
||||||
yt-dlp is up to date (2022.08.08)
|
yt-dlp is up to date (2022.07.18)
|
||||||
<more lines>
|
<more lines>
|
||||||
render: shell
|
render: shell
|
||||||
validations:
|
validations:
|
||||||
|
@ -2,13 +2,6 @@ name: Site feature request
|
|||||||
description: Request a new functionality for a supported site
|
description: Request a new functionality for a supported site
|
||||||
labels: [triage, site-enhancement]
|
labels: [triage, site-enhancement]
|
||||||
body:
|
body:
|
||||||
- type: checkboxes
|
|
||||||
attributes:
|
|
||||||
label: DO NOT REMOVE OR SKIP THE ISSUE TEMPLATE
|
|
||||||
description: Fill all fields even if you think it is irrelevant for the issue
|
|
||||||
options:
|
|
||||||
- label: I understand that I will be **blocked** if I remove or skip any mandatory\* field
|
|
||||||
required: true
|
|
||||||
- type: checkboxes
|
- type: checkboxes
|
||||||
id: checklist
|
id: checklist
|
||||||
attributes:
|
attributes:
|
||||||
@ -18,7 +11,7 @@ body:
|
|||||||
options:
|
options:
|
||||||
- label: I'm requesting a site-specific feature
|
- label: I'm requesting a site-specific feature
|
||||||
required: true
|
required: true
|
||||||
- label: I've verified that I'm running yt-dlp version **2022.08.08** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
|
- label: I've verified that I'm running yt-dlp version **2022.07.18** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
|
||||||
required: true
|
required: true
|
||||||
- label: I've checked that all provided URLs are playable in a browser with the same IP and same login details
|
- label: I've checked that all provided URLs are playable in a browser with the same IP and same login details
|
||||||
required: true
|
required: true
|
||||||
@ -70,7 +63,7 @@ body:
|
|||||||
[debug] Command-line config: ['-vU', 'test:youtube']
|
[debug] Command-line config: ['-vU', 'test:youtube']
|
||||||
[debug] Portable config "yt-dlp.conf": ['-i']
|
[debug] Portable config "yt-dlp.conf": ['-i']
|
||||||
[debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8
|
[debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8
|
||||||
[debug] yt-dlp version 2022.08.08 [9d339c4] (win32_exe)
|
[debug] yt-dlp version 2022.07.18 [9d339c4] (win32_exe)
|
||||||
[debug] Python 3.8.10 (CPython 64bit) - Windows-10-10.0.22000-SP0
|
[debug] Python 3.8.10 (CPython 64bit) - Windows-10-10.0.22000-SP0
|
||||||
[debug] Checking exe version: ffmpeg -bsfs
|
[debug] Checking exe version: ffmpeg -bsfs
|
||||||
[debug] Checking exe version: ffprobe -bsfs
|
[debug] Checking exe version: ffprobe -bsfs
|
||||||
@ -78,8 +71,8 @@ body:
|
|||||||
[debug] Optional libraries: Cryptodome-3.15.0, brotli-1.0.9, certifi-2022.06.15, mutagen-1.45.1, sqlite3-2.6.0, websockets-10.3
|
[debug] Optional libraries: Cryptodome-3.15.0, brotli-1.0.9, certifi-2022.06.15, mutagen-1.45.1, sqlite3-2.6.0, websockets-10.3
|
||||||
[debug] Proxy map: {}
|
[debug] Proxy map: {}
|
||||||
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest
|
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest
|
||||||
Latest version: 2022.08.08, Current version: 2022.08.08
|
Latest version: 2022.07.18, Current version: 2022.07.18
|
||||||
yt-dlp is up to date (2022.08.08)
|
yt-dlp is up to date (2022.07.18)
|
||||||
<more lines>
|
<more lines>
|
||||||
render: shell
|
render: shell
|
||||||
validations:
|
validations:
|
||||||
|
15
.github/ISSUE_TEMPLATE/4_bug_report.yml
vendored
15
.github/ISSUE_TEMPLATE/4_bug_report.yml
vendored
@ -2,13 +2,6 @@ name: Bug report
|
|||||||
description: Report a bug unrelated to any particular site or extractor
|
description: Report a bug unrelated to any particular site or extractor
|
||||||
labels: [triage, bug]
|
labels: [triage, bug]
|
||||||
body:
|
body:
|
||||||
- type: checkboxes
|
|
||||||
attributes:
|
|
||||||
label: DO NOT REMOVE OR SKIP THE ISSUE TEMPLATE
|
|
||||||
description: Fill all fields even if you think it is irrelevant for the issue
|
|
||||||
options:
|
|
||||||
- label: I understand that I will be **blocked** if I remove or skip any mandatory\* field
|
|
||||||
required: true
|
|
||||||
- type: checkboxes
|
- type: checkboxes
|
||||||
id: checklist
|
id: checklist
|
||||||
attributes:
|
attributes:
|
||||||
@ -18,7 +11,7 @@ body:
|
|||||||
options:
|
options:
|
||||||
- label: I'm reporting a bug unrelated to a specific site
|
- label: I'm reporting a bug unrelated to a specific site
|
||||||
required: true
|
required: true
|
||||||
- label: I've verified that I'm running yt-dlp version **2022.08.08** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
|
- label: I've verified that I'm running yt-dlp version **2022.07.18** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
|
||||||
required: true
|
required: true
|
||||||
- label: I've checked that all provided URLs are playable in a browser with the same IP and same login details
|
- label: I've checked that all provided URLs are playable in a browser with the same IP and same login details
|
||||||
required: true
|
required: true
|
||||||
@ -55,7 +48,7 @@ body:
|
|||||||
[debug] Command-line config: ['-vU', 'test:youtube']
|
[debug] Command-line config: ['-vU', 'test:youtube']
|
||||||
[debug] Portable config "yt-dlp.conf": ['-i']
|
[debug] Portable config "yt-dlp.conf": ['-i']
|
||||||
[debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8
|
[debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8
|
||||||
[debug] yt-dlp version 2022.08.08 [9d339c4] (win32_exe)
|
[debug] yt-dlp version 2022.07.18 [9d339c4] (win32_exe)
|
||||||
[debug] Python 3.8.10 (CPython 64bit) - Windows-10-10.0.22000-SP0
|
[debug] Python 3.8.10 (CPython 64bit) - Windows-10-10.0.22000-SP0
|
||||||
[debug] Checking exe version: ffmpeg -bsfs
|
[debug] Checking exe version: ffmpeg -bsfs
|
||||||
[debug] Checking exe version: ffprobe -bsfs
|
[debug] Checking exe version: ffprobe -bsfs
|
||||||
@ -63,8 +56,8 @@ body:
|
|||||||
[debug] Optional libraries: Cryptodome-3.15.0, brotli-1.0.9, certifi-2022.06.15, mutagen-1.45.1, sqlite3-2.6.0, websockets-10.3
|
[debug] Optional libraries: Cryptodome-3.15.0, brotli-1.0.9, certifi-2022.06.15, mutagen-1.45.1, sqlite3-2.6.0, websockets-10.3
|
||||||
[debug] Proxy map: {}
|
[debug] Proxy map: {}
|
||||||
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest
|
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest
|
||||||
Latest version: 2022.08.08, Current version: 2022.08.08
|
Latest version: 2022.07.18, Current version: 2022.07.18
|
||||||
yt-dlp is up to date (2022.08.08)
|
yt-dlp is up to date (2022.07.18)
|
||||||
<more lines>
|
<more lines>
|
||||||
render: shell
|
render: shell
|
||||||
validations:
|
validations:
|
||||||
|
15
.github/ISSUE_TEMPLATE/5_feature_request.yml
vendored
15
.github/ISSUE_TEMPLATE/5_feature_request.yml
vendored
@ -2,13 +2,6 @@ name: Feature request
|
|||||||
description: Request a new functionality unrelated to any particular site or extractor
|
description: Request a new functionality unrelated to any particular site or extractor
|
||||||
labels: [triage, enhancement]
|
labels: [triage, enhancement]
|
||||||
body:
|
body:
|
||||||
- type: checkboxes
|
|
||||||
attributes:
|
|
||||||
label: DO NOT REMOVE OR SKIP THE ISSUE TEMPLATE
|
|
||||||
description: Fill all fields even if you think it is irrelevant for the issue
|
|
||||||
options:
|
|
||||||
- label: I understand that I will be **blocked** if I remove or skip any mandatory\* field
|
|
||||||
required: true
|
|
||||||
- type: checkboxes
|
- type: checkboxes
|
||||||
id: checklist
|
id: checklist
|
||||||
attributes:
|
attributes:
|
||||||
@ -20,7 +13,7 @@ body:
|
|||||||
required: true
|
required: true
|
||||||
- label: I've looked through the [README](https://github.com/yt-dlp/yt-dlp#readme)
|
- label: I've looked through the [README](https://github.com/yt-dlp/yt-dlp#readme)
|
||||||
required: true
|
required: true
|
||||||
- label: I've verified that I'm running yt-dlp version **2022.08.08** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
|
- label: I've verified that I'm running yt-dlp version **2022.07.18** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
|
||||||
required: true
|
required: true
|
||||||
- label: I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues **including closed ones**. DO NOT post duplicates
|
- label: I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues **including closed ones**. DO NOT post duplicates
|
||||||
required: true
|
required: true
|
||||||
@ -51,7 +44,7 @@ body:
|
|||||||
[debug] Command-line config: ['-vU', 'test:youtube']
|
[debug] Command-line config: ['-vU', 'test:youtube']
|
||||||
[debug] Portable config "yt-dlp.conf": ['-i']
|
[debug] Portable config "yt-dlp.conf": ['-i']
|
||||||
[debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8
|
[debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8
|
||||||
[debug] yt-dlp version 2022.08.08 [9d339c4] (win32_exe)
|
[debug] yt-dlp version 2022.07.18 [9d339c4] (win32_exe)
|
||||||
[debug] Python 3.8.10 (CPython 64bit) - Windows-10-10.0.22000-SP0
|
[debug] Python 3.8.10 (CPython 64bit) - Windows-10-10.0.22000-SP0
|
||||||
[debug] Checking exe version: ffmpeg -bsfs
|
[debug] Checking exe version: ffmpeg -bsfs
|
||||||
[debug] Checking exe version: ffprobe -bsfs
|
[debug] Checking exe version: ffprobe -bsfs
|
||||||
@ -59,7 +52,7 @@ body:
|
|||||||
[debug] Optional libraries: Cryptodome-3.15.0, brotli-1.0.9, certifi-2022.06.15, mutagen-1.45.1, sqlite3-2.6.0, websockets-10.3
|
[debug] Optional libraries: Cryptodome-3.15.0, brotli-1.0.9, certifi-2022.06.15, mutagen-1.45.1, sqlite3-2.6.0, websockets-10.3
|
||||||
[debug] Proxy map: {}
|
[debug] Proxy map: {}
|
||||||
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest
|
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest
|
||||||
Latest version: 2022.08.08, Current version: 2022.08.08
|
Latest version: 2022.07.18, Current version: 2022.07.18
|
||||||
yt-dlp is up to date (2022.08.08)
|
yt-dlp is up to date (2022.07.18)
|
||||||
<more lines>
|
<more lines>
|
||||||
render: shell
|
render: shell
|
||||||
|
17
.github/ISSUE_TEMPLATE/6_question.yml
vendored
17
.github/ISSUE_TEMPLATE/6_question.yml
vendored
@ -2,19 +2,12 @@ name: Ask question
|
|||||||
description: Ask yt-dlp related question
|
description: Ask yt-dlp related question
|
||||||
labels: [question]
|
labels: [question]
|
||||||
body:
|
body:
|
||||||
- type: checkboxes
|
|
||||||
attributes:
|
|
||||||
label: DO NOT REMOVE OR SKIP THE ISSUE TEMPLATE
|
|
||||||
description: Fill all fields even if you think it is irrelevant for the issue
|
|
||||||
options:
|
|
||||||
- label: I understand that I will be **blocked** if I remove or skip any mandatory\* field
|
|
||||||
required: true
|
|
||||||
- type: markdown
|
- type: markdown
|
||||||
attributes:
|
attributes:
|
||||||
value: |
|
value: |
|
||||||
### Make sure you are **only** asking a question and not reporting a bug or requesting a feature.
|
### Make sure you are **only** asking a question and not reporting a bug or requesting a feature.
|
||||||
If your question contains "isn't working" or "can you add", this is most likely the wrong template.
|
If your question contains "isn't working" or "can you add", this is most likely the wrong template.
|
||||||
If you are in doubt whether this is the right template, **USE ANOTHER TEMPLATE**!
|
If you are in doubt whether this is the right template, **use another template**!
|
||||||
- type: checkboxes
|
- type: checkboxes
|
||||||
id: checklist
|
id: checklist
|
||||||
attributes:
|
attributes:
|
||||||
@ -26,7 +19,7 @@ body:
|
|||||||
required: true
|
required: true
|
||||||
- label: I've looked through the [README](https://github.com/yt-dlp/yt-dlp#readme)
|
- label: I've looked through the [README](https://github.com/yt-dlp/yt-dlp#readme)
|
||||||
required: true
|
required: true
|
||||||
- label: I've verified that I'm running yt-dlp version **2022.08.08** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
|
- label: I've verified that I'm running yt-dlp version **2022.07.18** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
|
||||||
required: true
|
required: true
|
||||||
- label: I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar questions **including closed ones**. DO NOT post duplicates
|
- label: I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar questions **including closed ones**. DO NOT post duplicates
|
||||||
required: true
|
required: true
|
||||||
@ -57,7 +50,7 @@ body:
|
|||||||
[debug] Command-line config: ['-vU', 'test:youtube']
|
[debug] Command-line config: ['-vU', 'test:youtube']
|
||||||
[debug] Portable config "yt-dlp.conf": ['-i']
|
[debug] Portable config "yt-dlp.conf": ['-i']
|
||||||
[debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8
|
[debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8
|
||||||
[debug] yt-dlp version 2022.08.08 [9d339c4] (win32_exe)
|
[debug] yt-dlp version 2022.07.18 [9d339c4] (win32_exe)
|
||||||
[debug] Python 3.8.10 (CPython 64bit) - Windows-10-10.0.22000-SP0
|
[debug] Python 3.8.10 (CPython 64bit) - Windows-10-10.0.22000-SP0
|
||||||
[debug] Checking exe version: ffmpeg -bsfs
|
[debug] Checking exe version: ffmpeg -bsfs
|
||||||
[debug] Checking exe version: ffprobe -bsfs
|
[debug] Checking exe version: ffprobe -bsfs
|
||||||
@ -65,7 +58,7 @@ body:
|
|||||||
[debug] Optional libraries: Cryptodome-3.15.0, brotli-1.0.9, certifi-2022.06.15, mutagen-1.45.1, sqlite3-2.6.0, websockets-10.3
|
[debug] Optional libraries: Cryptodome-3.15.0, brotli-1.0.9, certifi-2022.06.15, mutagen-1.45.1, sqlite3-2.6.0, websockets-10.3
|
||||||
[debug] Proxy map: {}
|
[debug] Proxy map: {}
|
||||||
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest
|
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest
|
||||||
Latest version: 2022.08.08, Current version: 2022.08.08
|
Latest version: 2022.07.18, Current version: 2022.07.18
|
||||||
yt-dlp is up to date (2022.08.08)
|
yt-dlp is up to date (2022.07.18)
|
||||||
<more lines>
|
<more lines>
|
||||||
render: shell
|
render: shell
|
||||||
|
@ -2,7 +2,6 @@ name: Broken site
|
|||||||
description: Report broken or misfunctioning site
|
description: Report broken or misfunctioning site
|
||||||
labels: [triage, site-bug]
|
labels: [triage, site-bug]
|
||||||
body:
|
body:
|
||||||
%(no_skip)s
|
|
||||||
- type: checkboxes
|
- type: checkboxes
|
||||||
id: checklist
|
id: checklist
|
||||||
attributes:
|
attributes:
|
||||||
|
@ -2,7 +2,6 @@ name: Site support request
|
|||||||
description: Request support for a new site
|
description: Request support for a new site
|
||||||
labels: [triage, site-request]
|
labels: [triage, site-request]
|
||||||
body:
|
body:
|
||||||
%(no_skip)s
|
|
||||||
- type: checkboxes
|
- type: checkboxes
|
||||||
id: checklist
|
id: checklist
|
||||||
attributes:
|
attributes:
|
||||||
|
@ -2,7 +2,6 @@ name: Site feature request
|
|||||||
description: Request a new functionality for a supported site
|
description: Request a new functionality for a supported site
|
||||||
labels: [triage, site-enhancement]
|
labels: [triage, site-enhancement]
|
||||||
body:
|
body:
|
||||||
%(no_skip)s
|
|
||||||
- type: checkboxes
|
- type: checkboxes
|
||||||
id: checklist
|
id: checklist
|
||||||
attributes:
|
attributes:
|
||||||
|
1
.github/ISSUE_TEMPLATE_tmpl/4_bug_report.yml
vendored
1
.github/ISSUE_TEMPLATE_tmpl/4_bug_report.yml
vendored
@ -2,7 +2,6 @@ name: Bug report
|
|||||||
description: Report a bug unrelated to any particular site or extractor
|
description: Report a bug unrelated to any particular site or extractor
|
||||||
labels: [triage, bug]
|
labels: [triage, bug]
|
||||||
body:
|
body:
|
||||||
%(no_skip)s
|
|
||||||
- type: checkboxes
|
- type: checkboxes
|
||||||
id: checklist
|
id: checklist
|
||||||
attributes:
|
attributes:
|
||||||
|
@ -2,7 +2,6 @@ name: Feature request
|
|||||||
description: Request a new functionality unrelated to any particular site or extractor
|
description: Request a new functionality unrelated to any particular site or extractor
|
||||||
labels: [triage, enhancement]
|
labels: [triage, enhancement]
|
||||||
body:
|
body:
|
||||||
%(no_skip)s
|
|
||||||
- type: checkboxes
|
- type: checkboxes
|
||||||
id: checklist
|
id: checklist
|
||||||
attributes:
|
attributes:
|
||||||
|
3
.github/ISSUE_TEMPLATE_tmpl/6_question.yml
vendored
3
.github/ISSUE_TEMPLATE_tmpl/6_question.yml
vendored
@ -2,13 +2,12 @@ name: Ask question
|
|||||||
description: Ask yt-dlp related question
|
description: Ask yt-dlp related question
|
||||||
labels: [question]
|
labels: [question]
|
||||||
body:
|
body:
|
||||||
%(no_skip)s
|
|
||||||
- type: markdown
|
- type: markdown
|
||||||
attributes:
|
attributes:
|
||||||
value: |
|
value: |
|
||||||
### Make sure you are **only** asking a question and not reporting a bug or requesting a feature.
|
### Make sure you are **only** asking a question and not reporting a bug or requesting a feature.
|
||||||
If your question contains "isn't working" or "can you add", this is most likely the wrong template.
|
If your question contains "isn't working" or "can you add", this is most likely the wrong template.
|
||||||
If you are in doubt whether this is the right template, **USE ANOTHER TEMPLATE**!
|
If you are in doubt whether this is the right template, **use another template**!
|
||||||
- type: checkboxes
|
- type: checkboxes
|
||||||
id: checklist
|
id: checklist
|
||||||
attributes:
|
attributes:
|
||||||
|
31
.github/PULL_REQUEST_TEMPLATE.md
vendored
31
.github/PULL_REQUEST_TEMPLATE.md
vendored
@ -1,20 +1,3 @@
|
|||||||
**IMPORTANT**: PRs without the template will be CLOSED
|
|
||||||
|
|
||||||
### Description of your *pull request* and other information
|
|
||||||
|
|
||||||
</details>
|
|
||||||
|
|
||||||
<!--
|
|
||||||
|
|
||||||
Explanation of your *pull request* in arbitrary form goes here. Please **make sure the description explains the purpose and effect** of your *pull request* and is worded well enough to be understood. Provide as much **context and examples** as possible
|
|
||||||
|
|
||||||
-->
|
|
||||||
|
|
||||||
ADD DESCRIPTION HERE
|
|
||||||
|
|
||||||
Fixes #
|
|
||||||
|
|
||||||
|
|
||||||
<details open><summary>Template</summary> <!-- OPEN is intentional -->
|
<details open><summary>Template</summary> <!-- OPEN is intentional -->
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
@ -41,3 +24,17 @@ Fixes #
|
|||||||
- [ ] New extractor ([Piracy websites will not be accepted](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#is-the-website-primarily-used-for-piracy))
|
- [ ] New extractor ([Piracy websites will not be accepted](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#is-the-website-primarily-used-for-piracy))
|
||||||
- [ ] Core bug fix/improvement
|
- [ ] Core bug fix/improvement
|
||||||
- [ ] New feature (It is strongly [recommended to open an issue first](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#adding-new-feature-or-making-overarching-changes))
|
- [ ] New feature (It is strongly [recommended to open an issue first](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#adding-new-feature-or-making-overarching-changes))
|
||||||
|
|
||||||
|
### Description of your *pull request* and other information
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
<!--
|
||||||
|
|
||||||
|
Explanation of your *pull request* in arbitrary form goes here. Please **make sure the description explains the purpose and effect** of your *pull request* and is worded well enough to be understood. Provide as much **context and examples** as possible
|
||||||
|
|
||||||
|
-->
|
||||||
|
|
||||||
|
DESCRIPTION
|
||||||
|
|
||||||
|
Fixes #
|
||||||
|
390
.github/workflows/build.yml
vendored
390
.github/workflows/build.yml
vendored
@ -2,17 +2,18 @@ name: Build
|
|||||||
on: workflow_dispatch
|
on: workflow_dispatch
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
prepare:
|
create_release:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
outputs:
|
outputs:
|
||||||
version_suffix: ${{ steps.version_suffix.outputs.version_suffix }}
|
version_suffix: ${{ steps.version_suffix.outputs.version_suffix }}
|
||||||
ytdlp_version: ${{ steps.bump_version.outputs.ytdlp_version }}
|
ytdlp_version: ${{ steps.bump_version.outputs.ytdlp_version }}
|
||||||
head_sha: ${{ steps.push_release.outputs.head_sha }}
|
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||||
|
release_id: ${{ steps.create_release.outputs.id }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v2
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
- uses: actions/setup-python@v4
|
- uses: actions/setup-python@v2
|
||||||
with:
|
with:
|
||||||
python-version: '3.10'
|
python-version: '3.10'
|
||||||
|
|
||||||
@ -42,15 +43,53 @@ jobs:
|
|||||||
PUSH_VERSION_COMMIT: ${{ secrets.PUSH_VERSION_COMMIT }}
|
PUSH_VERSION_COMMIT: ${{ secrets.PUSH_VERSION_COMMIT }}
|
||||||
if: "env.PUSH_VERSION_COMMIT != ''"
|
if: "env.PUSH_VERSION_COMMIT != ''"
|
||||||
run: git push origin ${{ github.event.ref }}
|
run: git push origin ${{ github.event.ref }}
|
||||||
|
- name: Get Changelog
|
||||||
|
run: |
|
||||||
|
changelog=$(grep -oPz '(?s)(?<=### ${{ steps.bump_version.outputs.ytdlp_version }}\n{2}).+?(?=\n{2,3}###)' Changelog.md) || true
|
||||||
|
echo "changelog<<EOF" >> $GITHUB_ENV
|
||||||
|
echo "$changelog" >> $GITHUB_ENV
|
||||||
|
echo "EOF" >> $GITHUB_ENV
|
||||||
|
|
||||||
|
- name: Create Release
|
||||||
|
id: create_release
|
||||||
|
uses: actions/create-release@v1
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
with:
|
||||||
|
tag_name: ${{ steps.bump_version.outputs.ytdlp_version }}
|
||||||
|
release_name: yt-dlp ${{ steps.bump_version.outputs.ytdlp_version }}
|
||||||
|
commitish: ${{ steps.push_release.outputs.head_sha }}
|
||||||
|
draft: true
|
||||||
|
prerelease: false
|
||||||
|
body: |
|
||||||
|
#### [A description of the various files]((https://github.com/yt-dlp/yt-dlp#release-files)) are in the README
|
||||||
|
|
||||||
|
---
|
||||||
|
<details open><summary><h3>Changelog</summary>
|
||||||
|
<p>
|
||||||
|
|
||||||
|
${{ env.changelog }}
|
||||||
|
|
||||||
|
</p>
|
||||||
|
</details>
|
||||||
|
|
||||||
|
|
||||||
build_unix:
|
build_unix:
|
||||||
needs: prepare
|
needs: create_release
|
||||||
runs-on: ubuntu-18.04 # Standalone executable should be built on minimum supported OS
|
runs-on: ubuntu-18.04 # Standalone executable should be built on minimum supported OS
|
||||||
|
outputs:
|
||||||
|
sha256_bin: ${{ steps.get_sha.outputs.sha256_bin }}
|
||||||
|
sha512_bin: ${{ steps.get_sha.outputs.sha512_bin }}
|
||||||
|
sha256_tar: ${{ steps.get_sha.outputs.sha256_tar }}
|
||||||
|
sha512_tar: ${{ steps.get_sha.outputs.sha512_tar }}
|
||||||
|
sha256_linux: ${{ steps.get_sha.outputs.sha256_linux }}
|
||||||
|
sha512_linux: ${{ steps.get_sha.outputs.sha512_linux }}
|
||||||
|
sha256_linux_zip: ${{ steps.get_sha.outputs.sha256_linux_zip }}
|
||||||
|
sha512_linux_zip: ${{ steps.get_sha.outputs.sha512_linux_zip }}
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v2
|
||||||
- uses: actions/setup-python@v4
|
- uses: actions/setup-python@v2
|
||||||
with:
|
with:
|
||||||
python-version: '3.10'
|
python-version: '3.10'
|
||||||
- name: Install Requirements
|
- name: Install Requirements
|
||||||
@ -61,7 +100,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Prepare
|
- name: Prepare
|
||||||
run: |
|
run: |
|
||||||
python devscripts/update-version.py ${{ needs.prepare.outputs.version_suffix }}
|
python devscripts/update-version.py ${{ needs.create_release.outputs.version_suffix }}
|
||||||
python devscripts/make_lazy_extractors.py
|
python devscripts/make_lazy_extractors.py
|
||||||
- name: Build Unix executables
|
- name: Build Unix executables
|
||||||
run: |
|
run: |
|
||||||
@ -72,15 +111,51 @@ jobs:
|
|||||||
- name: Get SHA2-SUMS
|
- name: Get SHA2-SUMS
|
||||||
id: get_sha
|
id: get_sha
|
||||||
run: |
|
run: |
|
||||||
|
echo "::set-output name=sha256_bin::$(sha256sum yt-dlp | awk '{print $1}')"
|
||||||
|
echo "::set-output name=sha512_bin::$(sha512sum yt-dlp | awk '{print $1}')"
|
||||||
|
echo "::set-output name=sha256_tar::$(sha256sum yt-dlp.tar.gz | awk '{print $1}')"
|
||||||
|
echo "::set-output name=sha512_tar::$(sha512sum yt-dlp.tar.gz | awk '{print $1}')"
|
||||||
|
echo "::set-output name=sha256_linux::$(sha256sum dist/yt-dlp_linux | awk '{print $1}')"
|
||||||
|
echo "::set-output name=sha512_linux::$(sha512sum dist/yt-dlp_linux | awk '{print $1}')"
|
||||||
|
echo "::set-output name=sha256_linux_zip::$(sha256sum dist/yt-dlp_linux.zip | awk '{print $1}')"
|
||||||
|
echo "::set-output name=sha512_linux_zip::$(sha512sum dist/yt-dlp_linux.zip | awk '{print $1}')"
|
||||||
|
|
||||||
- name: Upload artifacts
|
- name: Upload zip binary
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-release-asset@v1
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
with:
|
with:
|
||||||
path: |
|
upload_url: ${{ needs.create_release.outputs.upload_url }}
|
||||||
yt-dlp
|
asset_path: ./yt-dlp
|
||||||
yt-dlp.tar.gz
|
asset_name: yt-dlp
|
||||||
dist/yt-dlp_linux
|
asset_content_type: application/octet-stream
|
||||||
dist/yt-dlp_linux.zip
|
- name: Upload Source tar
|
||||||
|
uses: actions/upload-release-asset@v1
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
with:
|
||||||
|
upload_url: ${{ needs.create_release.outputs.upload_url }}
|
||||||
|
asset_path: ./yt-dlp.tar.gz
|
||||||
|
asset_name: yt-dlp.tar.gz
|
||||||
|
asset_content_type: application/gzip
|
||||||
|
- name: Upload standalone binary
|
||||||
|
uses: actions/upload-release-asset@v1
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
with:
|
||||||
|
upload_url: ${{ needs.create_release.outputs.upload_url }}
|
||||||
|
asset_path: ./dist/yt-dlp_linux
|
||||||
|
asset_name: yt-dlp_linux
|
||||||
|
asset_content_type: application/octet-stream
|
||||||
|
- name: Upload onedir binary
|
||||||
|
uses: actions/upload-release-asset@v1
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
with:
|
||||||
|
upload_url: ${{ needs.create_release.outputs.upload_url }}
|
||||||
|
asset_path: ./dist/yt-dlp_linux.zip
|
||||||
|
asset_name: yt-dlp_linux.zip
|
||||||
|
asset_content_type: application/zip
|
||||||
|
|
||||||
- name: Build and publish on PyPi
|
- name: Build and publish on PyPi
|
||||||
env:
|
env:
|
||||||
@ -89,7 +164,6 @@ jobs:
|
|||||||
if: "env.TWINE_PASSWORD != ''"
|
if: "env.TWINE_PASSWORD != ''"
|
||||||
run: |
|
run: |
|
||||||
rm -rf dist/*
|
rm -rf dist/*
|
||||||
python devscripts/set-variant.py pip -M "You installed yt-dlp with pip or using the wheel from PyPi; Use that to update"
|
|
||||||
python setup.py sdist bdist_wheel
|
python setup.py sdist bdist_wheel
|
||||||
twine upload dist/*
|
twine upload dist/*
|
||||||
|
|
||||||
@ -106,19 +180,24 @@ jobs:
|
|||||||
if: "env.BREW_TOKEN != ''"
|
if: "env.BREW_TOKEN != ''"
|
||||||
run: |
|
run: |
|
||||||
git clone git@github.com:yt-dlp/homebrew-taps taps/
|
git clone git@github.com:yt-dlp/homebrew-taps taps/
|
||||||
python devscripts/update-formulae.py taps/Formula/yt-dlp.rb "${{ needs.prepare.outputs.ytdlp_version }}"
|
python devscripts/update-formulae.py taps/Formula/yt-dlp.rb "${{ needs.create_release.outputs.ytdlp_version }}"
|
||||||
git -C taps/ config user.name github-actions
|
git -C taps/ config user.name github-actions
|
||||||
git -C taps/ config user.email github-actions@example.com
|
git -C taps/ config user.email github-actions@example.com
|
||||||
git -C taps/ commit -am 'yt-dlp: ${{ needs.prepare.outputs.ytdlp_version }}'
|
git -C taps/ commit -am 'yt-dlp: ${{ needs.create_release.outputs.ytdlp_version }}'
|
||||||
git -C taps/ push
|
git -C taps/ push
|
||||||
|
|
||||||
|
|
||||||
build_macos:
|
build_macos:
|
||||||
runs-on: macos-11
|
runs-on: macos-11
|
||||||
needs: prepare
|
needs: create_release
|
||||||
|
outputs:
|
||||||
|
sha256_macos: ${{ steps.get_sha.outputs.sha256_macos }}
|
||||||
|
sha512_macos: ${{ steps.get_sha.outputs.sha512_macos }}
|
||||||
|
sha256_macos_zip: ${{ steps.get_sha.outputs.sha256_macos_zip }}
|
||||||
|
sha512_macos_zip: ${{ steps.get_sha.outputs.sha512_macos_zip }}
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v2
|
||||||
# NB: In order to create a universal2 application, the version of python3 in /usr/bin has to be used
|
# NB: In order to create a universal2 application, the version of python3 in /usr/bin has to be used
|
||||||
- name: Install Requirements
|
- name: Install Requirements
|
||||||
run: |
|
run: |
|
||||||
@ -127,28 +206,50 @@ jobs:
|
|||||||
|
|
||||||
- name: Prepare
|
- name: Prepare
|
||||||
run: |
|
run: |
|
||||||
/usr/bin/python3 devscripts/update-version.py ${{ needs.prepare.outputs.version_suffix }}
|
/usr/bin/python3 devscripts/update-version.py ${{ needs.create_release.outputs.version_suffix }}
|
||||||
/usr/bin/python3 devscripts/make_lazy_extractors.py
|
/usr/bin/python3 devscripts/make_lazy_extractors.py
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
/usr/bin/python3 pyinst.py --target-architecture universal2 --onedir
|
/usr/bin/python3 pyinst.py --target-architecture universal2 --onedir
|
||||||
(cd ./dist/yt-dlp_macos && zip -r ../yt-dlp_macos.zip .)
|
(cd ./dist/yt-dlp_macos && zip -r ../yt-dlp_macos.zip .)
|
||||||
/usr/bin/python3 pyinst.py --target-architecture universal2
|
/usr/bin/python3 pyinst.py --target-architecture universal2
|
||||||
|
- name: Get SHA2-SUMS
|
||||||
|
id: get_sha
|
||||||
|
run: |
|
||||||
|
echo "::set-output name=sha256_macos::$(sha256sum dist/yt-dlp_macos | awk '{print $1}')"
|
||||||
|
echo "::set-output name=sha512_macos::$(sha512sum dist/yt-dlp_macos | awk '{print $1}')"
|
||||||
|
echo "::set-output name=sha256_macos_zip::$(sha256sum dist/yt-dlp_macos.zip | awk '{print $1}')"
|
||||||
|
echo "::set-output name=sha512_macos_zip::$(sha512sum dist/yt-dlp_macos.zip | awk '{print $1}')"
|
||||||
|
|
||||||
- name: Upload artifacts
|
- name: Upload standalone binary
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-release-asset@v1
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
with:
|
with:
|
||||||
path: |
|
upload_url: ${{ needs.create_release.outputs.upload_url }}
|
||||||
dist/yt-dlp_macos
|
asset_path: ./dist/yt-dlp_macos
|
||||||
dist/yt-dlp_macos.zip
|
asset_name: yt-dlp_macos
|
||||||
|
asset_content_type: application/octet-stream
|
||||||
|
- name: Upload onedir binary
|
||||||
|
uses: actions/upload-release-asset@v1
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
with:
|
||||||
|
upload_url: ${{ needs.create_release.outputs.upload_url }}
|
||||||
|
asset_path: ./dist/yt-dlp_macos.zip
|
||||||
|
asset_name: yt-dlp_macos.zip
|
||||||
|
asset_content_type: application/zip
|
||||||
|
|
||||||
|
|
||||||
build_macos_legacy:
|
build_macos_legacy:
|
||||||
runs-on: macos-latest
|
runs-on: macos-latest
|
||||||
needs: prepare
|
needs: create_release
|
||||||
|
outputs:
|
||||||
|
sha256_macos_legacy: ${{ steps.get_sha.outputs.sha256_macos_legacy }}
|
||||||
|
sha512_macos_legacy: ${{ steps.get_sha.outputs.sha512_macos_legacy }}
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v2
|
||||||
- name: Install Python
|
- name: Install Python
|
||||||
# We need the official Python, because the GA ones only support newer macOS versions
|
# We need the official Python, because the GA ones only support newer macOS versions
|
||||||
env:
|
env:
|
||||||
@ -168,37 +269,52 @@ jobs:
|
|||||||
|
|
||||||
- name: Prepare
|
- name: Prepare
|
||||||
run: |
|
run: |
|
||||||
python3 devscripts/update-version.py ${{ needs.prepare.outputs.version_suffix }}
|
python3 devscripts/update-version.py ${{ needs.create_release.outputs.version_suffix }}
|
||||||
python3 devscripts/make_lazy_extractors.py
|
python3 devscripts/make_lazy_extractors.py
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
python3 pyinst.py
|
python3 pyinst.py
|
||||||
mv dist/yt-dlp_macos dist/yt-dlp_macos_legacy
|
- name: Get SHA2-SUMS
|
||||||
|
id: get_sha
|
||||||
|
run: |
|
||||||
|
echo "::set-output name=sha256_macos_legacy::$(sha256sum dist/yt-dlp_macos | awk '{print $1}')"
|
||||||
|
echo "::set-output name=sha512_macos_legacy::$(sha512sum dist/yt-dlp_macos | awk '{print $1}')"
|
||||||
|
|
||||||
- name: Upload artifacts
|
- name: Upload standalone binary
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-release-asset@v1
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
with:
|
with:
|
||||||
path: |
|
upload_url: ${{ needs.create_release.outputs.upload_url }}
|
||||||
dist/yt-dlp_macos_legacy
|
asset_path: ./dist/yt-dlp_macos
|
||||||
|
asset_name: yt-dlp_macos_legacy
|
||||||
|
asset_content_type: application/octet-stream
|
||||||
|
|
||||||
|
|
||||||
build_windows:
|
build_windows:
|
||||||
runs-on: windows-latest
|
runs-on: windows-latest
|
||||||
needs: prepare
|
needs: create_release
|
||||||
|
outputs:
|
||||||
|
sha256_win: ${{ steps.get_sha.outputs.sha256_win }}
|
||||||
|
sha512_win: ${{ steps.get_sha.outputs.sha512_win }}
|
||||||
|
sha256_py2exe: ${{ steps.get_sha.outputs.sha256_py2exe }}
|
||||||
|
sha512_py2exe: ${{ steps.get_sha.outputs.sha512_py2exe }}
|
||||||
|
sha256_win_zip: ${{ steps.get_sha.outputs.sha256_win_zip }}
|
||||||
|
sha512_win_zip: ${{ steps.get_sha.outputs.sha512_win_zip }}
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v2
|
||||||
- uses: actions/setup-python@v4
|
- uses: actions/setup-python@v2
|
||||||
with: # 3.8 is used for Win7 support
|
with: # 3.8 is used for Win7 support
|
||||||
python-version: '3.8'
|
python-version: '3.8'
|
||||||
- name: Install Requirements
|
- name: Install Requirements
|
||||||
run: | # Custom pyinstaller built with https://github.com/yt-dlp/pyinstaller-builds
|
run: | # Custom pyinstaller built with https://github.com/yt-dlp/pyinstaller-builds
|
||||||
python -m pip install --upgrade pip setuptools wheel py2exe
|
python -m pip install --upgrade pip setuptools wheel py2exe
|
||||||
pip install "https://yt-dlp.github.io/Pyinstaller-Builds/x86_64/pyinstaller-5.2-py3-none-any.whl" -r requirements.txt
|
pip install "https://yt-dlp.github.io/Pyinstaller-Builds/x86_64/pyinstaller-4.10-py3-none-any.whl" -r requirements.txt
|
||||||
|
|
||||||
- name: Prepare
|
- name: Prepare
|
||||||
run: |
|
run: |
|
||||||
python devscripts/update-version.py ${{ needs.prepare.outputs.version_suffix }}
|
python devscripts/update-version.py ${{ needs.create_release.outputs.version_suffix }}
|
||||||
python devscripts/make_lazy_extractors.py
|
python devscripts/make_lazy_extractors.py
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
@ -207,118 +323,154 @@ jobs:
|
|||||||
python pyinst.py
|
python pyinst.py
|
||||||
python pyinst.py --onedir
|
python pyinst.py --onedir
|
||||||
Compress-Archive -Path ./dist/yt-dlp/* -DestinationPath ./dist/yt-dlp_win.zip
|
Compress-Archive -Path ./dist/yt-dlp/* -DestinationPath ./dist/yt-dlp_win.zip
|
||||||
|
- name: Get SHA2-SUMS
|
||||||
|
id: get_sha
|
||||||
|
run: |
|
||||||
|
echo "::set-output name=sha256_py2exe::$((Get-FileHash dist\yt-dlp_min.exe -Algorithm SHA256).Hash.ToLower())"
|
||||||
|
echo "::set-output name=sha512_py2exe::$((Get-FileHash dist\yt-dlp_min.exe -Algorithm SHA512).Hash.ToLower())"
|
||||||
|
echo "::set-output name=sha256_win::$((Get-FileHash dist\yt-dlp.exe -Algorithm SHA256).Hash.ToLower())"
|
||||||
|
echo "::set-output name=sha512_win::$((Get-FileHash dist\yt-dlp.exe -Algorithm SHA512).Hash.ToLower())"
|
||||||
|
echo "::set-output name=sha256_win_zip::$((Get-FileHash dist\yt-dlp_win.zip -Algorithm SHA256).Hash.ToLower())"
|
||||||
|
echo "::set-output name=sha512_win_zip::$((Get-FileHash dist\yt-dlp_win.zip -Algorithm SHA512).Hash.ToLower())"
|
||||||
|
|
||||||
- name: Upload artifacts
|
- name: Upload py2exe binary
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-release-asset@v1
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
with:
|
with:
|
||||||
path: |
|
upload_url: ${{ needs.create_release.outputs.upload_url }}
|
||||||
dist/yt-dlp.exe
|
asset_path: ./dist/yt-dlp_min.exe
|
||||||
dist/yt-dlp_min.exe
|
asset_name: yt-dlp_min.exe
|
||||||
dist/yt-dlp_win.zip
|
asset_content_type: application/vnd.microsoft.portable-executable
|
||||||
|
- name: Upload standalone binary
|
||||||
|
uses: actions/upload-release-asset@v1
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
with:
|
||||||
|
upload_url: ${{ needs.create_release.outputs.upload_url }}
|
||||||
|
asset_path: ./dist/yt-dlp.exe
|
||||||
|
asset_name: yt-dlp.exe
|
||||||
|
asset_content_type: application/vnd.microsoft.portable-executable
|
||||||
|
- name: Upload onedir binary
|
||||||
|
uses: actions/upload-release-asset@v1
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
with:
|
||||||
|
upload_url: ${{ needs.create_release.outputs.upload_url }}
|
||||||
|
asset_path: ./dist/yt-dlp_win.zip
|
||||||
|
asset_name: yt-dlp_win.zip
|
||||||
|
asset_content_type: application/zip
|
||||||
|
|
||||||
|
|
||||||
build_windows32:
|
build_windows32:
|
||||||
runs-on: windows-latest
|
runs-on: windows-latest
|
||||||
needs: prepare
|
needs: create_release
|
||||||
|
outputs:
|
||||||
|
sha256_win32: ${{ steps.get_sha.outputs.sha256_win32 }}
|
||||||
|
sha512_win32: ${{ steps.get_sha.outputs.sha512_win32 }}
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v2
|
||||||
- uses: actions/setup-python@v4
|
- uses: actions/setup-python@v2
|
||||||
with: # 3.7 is used for Vista support. See https://github.com/yt-dlp/yt-dlp/issues/390
|
with: # 3.7 is used for Vista support. See https://github.com/yt-dlp/yt-dlp/issues/390
|
||||||
python-version: '3.7'
|
python-version: '3.7'
|
||||||
architecture: 'x86'
|
architecture: 'x86'
|
||||||
- name: Install Requirements
|
- name: Install Requirements
|
||||||
run: |
|
run: |
|
||||||
python -m pip install --upgrade pip setuptools wheel
|
python -m pip install --upgrade pip setuptools wheel
|
||||||
pip install "https://yt-dlp.github.io/Pyinstaller-Builds/i686/pyinstaller-5.2-py3-none-any.whl" -r requirements.txt
|
pip install "https://yt-dlp.github.io/Pyinstaller-Builds/i686/pyinstaller-4.10-py3-none-any.whl" -r requirements.txt
|
||||||
|
|
||||||
- name: Prepare
|
- name: Prepare
|
||||||
run: |
|
run: |
|
||||||
python devscripts/update-version.py ${{ needs.prepare.outputs.version_suffix }}
|
python devscripts/update-version.py ${{ needs.create_release.outputs.version_suffix }}
|
||||||
python devscripts/make_lazy_extractors.py
|
python devscripts/make_lazy_extractors.py
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
python pyinst.py
|
python pyinst.py
|
||||||
|
- name: Get SHA2-SUMS
|
||||||
|
id: get_sha
|
||||||
|
run: |
|
||||||
|
echo "::set-output name=sha256_win32::$((Get-FileHash dist\yt-dlp_x86.exe -Algorithm SHA256).Hash.ToLower())"
|
||||||
|
echo "::set-output name=sha512_win32::$((Get-FileHash dist\yt-dlp_x86.exe -Algorithm SHA512).Hash.ToLower())"
|
||||||
|
|
||||||
- name: Upload artifacts
|
- name: Upload standalone binary
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-release-asset@v1
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
with:
|
with:
|
||||||
path: |
|
upload_url: ${{ needs.create_release.outputs.upload_url }}
|
||||||
dist/yt-dlp_x86.exe
|
asset_path: ./dist/yt-dlp_x86.exe
|
||||||
|
asset_name: yt-dlp_x86.exe
|
||||||
|
asset_content_type: application/vnd.microsoft.portable-executable
|
||||||
|
|
||||||
|
|
||||||
publish_release:
|
finish:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
needs: [prepare, build_unix, build_windows, build_windows32, build_macos, build_macos_legacy]
|
needs: [create_release, build_unix, build_windows, build_windows32, build_macos, build_macos_legacy]
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- name: Make SHA2-SUMS files
|
||||||
- uses: actions/download-artifact@v3
|
|
||||||
|
|
||||||
- name: Get Changelog
|
|
||||||
run: |
|
run: |
|
||||||
changelog=$(grep -oPz '(?s)(?<=### ${{ steps.bump_version.outputs.ytdlp_version }}\n{2}).+?(?=\n{2,3}###)' Changelog.md) || true
|
echo "${{ needs.build_unix.outputs.sha256_bin }} yt-dlp" >> SHA2-256SUMS
|
||||||
echo "changelog<<EOF" >> $GITHUB_ENV
|
echo "${{ needs.build_unix.outputs.sha256_tar }} yt-dlp.tar.gz" >> SHA2-256SUMS
|
||||||
echo "$changelog" >> $GITHUB_ENV
|
echo "${{ needs.build_unix.outputs.sha256_linux }} yt-dlp_linux" >> SHA2-256SUMS
|
||||||
echo "EOF" >> $GITHUB_ENV
|
echo "${{ needs.build_unix.outputs.sha256_linux_zip }} yt-dlp_linux.zip" >> SHA2-256SUMS
|
||||||
|
echo "${{ needs.build_windows.outputs.sha256_win }} yt-dlp.exe" >> SHA2-256SUMS
|
||||||
|
echo "${{ needs.build_windows.outputs.sha256_py2exe }} yt-dlp_min.exe" >> SHA2-256SUMS
|
||||||
|
echo "${{ needs.build_windows32.outputs.sha256_win32 }} yt-dlp_x86.exe" >> SHA2-256SUMS
|
||||||
|
echo "${{ needs.build_windows.outputs.sha256_win_zip }} yt-dlp_win.zip" >> SHA2-256SUMS
|
||||||
|
echo "${{ needs.build_macos.outputs.sha256_macos }} yt-dlp_macos" >> SHA2-256SUMS
|
||||||
|
echo "${{ needs.build_macos.outputs.sha256_macos_zip }} yt-dlp_macos.zip" >> SHA2-256SUMS
|
||||||
|
echo "${{ needs.build_macos_legacy.outputs.sha256_macos_legacy }} yt-dlp_macos_legacy" >> SHA2-256SUMS
|
||||||
|
echo "${{ needs.build_unix.outputs.sha512_bin }} yt-dlp" >> SHA2-512SUMS
|
||||||
|
echo "${{ needs.build_unix.outputs.sha512_tar }} yt-dlp.tar.gz" >> SHA2-512SUMS
|
||||||
|
echo "${{ needs.build_unix.outputs.sha512_linux }} yt-dlp_linux" >> SHA2-512SUMS
|
||||||
|
echo "${{ needs.build_unix.outputs.sha512_linux_zip }} yt-dlp_linux.zip" >> SHA2-512SUMS
|
||||||
|
echo "${{ needs.build_windows.outputs.sha512_win }} yt-dlp.exe" >> SHA2-512SUMS
|
||||||
|
echo "${{ needs.build_windows.outputs.sha512_py2exe }} yt-dlp_min.exe" >> SHA2-512SUMS
|
||||||
|
echo "${{ needs.build_windows32.outputs.sha512_win32 }} yt-dlp_x86.exe" >> SHA2-512SUMS
|
||||||
|
echo "${{ needs.build_windows.outputs.sha512_win_zip }} yt-dlp_win.zip" >> SHA2-512SUMS
|
||||||
|
echo "${{ needs.build_macos.outputs.sha512_macos }} yt-dlp_macos" >> SHA2-512SUMS
|
||||||
|
echo "${{ needs.build_macos.outputs.sha512_macos_zip }} yt-dlp_macos.zip" >> SHA2-512SUMS
|
||||||
|
echo "${{ needs.build_macos_legacy.outputs.sha512_macos_legacy }} yt-dlp_macos_legacy" >> SHA2-512SUMS
|
||||||
|
|
||||||
|
- name: Upload SHA2-256SUMS file
|
||||||
|
uses: actions/upload-release-asset@v1
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
with:
|
||||||
|
upload_url: ${{ needs.create_release.outputs.upload_url }}
|
||||||
|
asset_path: ./SHA2-256SUMS
|
||||||
|
asset_name: SHA2-256SUMS
|
||||||
|
asset_content_type: text/plain
|
||||||
|
- name: Upload SHA2-512SUMS file
|
||||||
|
uses: actions/upload-release-asset@v1
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
with:
|
||||||
|
upload_url: ${{ needs.create_release.outputs.upload_url }}
|
||||||
|
asset_path: ./SHA2-512SUMS
|
||||||
|
asset_name: SHA2-512SUMS
|
||||||
|
asset_content_type: text/plain
|
||||||
|
|
||||||
- name: Make Update spec
|
- name: Make Update spec
|
||||||
run: |
|
run: |
|
||||||
echo "# This file is used for regulating self-update" >> _update_spec
|
echo "# This file is used for regulating self-update" >> _update_spec
|
||||||
echo "lock 2022.07.18 .+ Python 3.6" >> _update_spec
|
echo "lock 2022.07.18 .+ Python 3.6" >> _update_spec
|
||||||
- name: Make SHA2-SUMS files
|
- name: Upload update spec
|
||||||
run: |
|
uses: actions/upload-release-asset@v1
|
||||||
sha256sum artifact/yt-dlp | awk '{print $1 " yt-dlp"}' >> SHA2-256SUMS
|
env:
|
||||||
sha256sum artifact/yt-dlp.tar.gz | awk '{print $1 " yt-dlp.tar.gz"}' >> SHA2-256SUMS
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
sha256sum artifact/yt-dlp.exe | awk '{print $1 " yt-dlp.exe"}' >> SHA2-256SUMS
|
|
||||||
sha256sum artifact/yt-dlp_win.zip | awk '{print $1 " yt-dlp_win.zip"}' >> SHA2-256SUMS
|
|
||||||
sha256sum artifact/yt-dlp_min.exe | awk '{print $1 " yt-dlp_min.exe"}' >> SHA2-256SUMS
|
|
||||||
sha256sum artifact/yt-dlp_x86.exe | awk '{print $1 " yt-dlp_x86.exe"}' >> SHA2-256SUMS
|
|
||||||
sha256sum artifact/yt-dlp_macos | awk '{print $1 " yt-dlp_macos"}' >> SHA2-256SUMS
|
|
||||||
sha256sum artifact/yt-dlp_macos.zip | awk '{print $1 " yt-dlp_macos.zip"}' >> SHA2-256SUMS
|
|
||||||
sha256sum artifact/yt-dlp_macos_legacy | awk '{print $1 " yt-dlp_macos_legacy"}' >> SHA2-256SUMS
|
|
||||||
sha256sum artifact/dist/yt-dlp_linux | awk '{print $1 " yt-dlp_linux"}' >> SHA2-256SUMS
|
|
||||||
sha256sum artifact/dist/yt-dlp_linux.zip | awk '{print $1 " yt-dlp_linux.zip"}' >> SHA2-256SUMS
|
|
||||||
sha512sum artifact/yt-dlp | awk '{print $1 " yt-dlp"}' >> SHA2-512SUMS
|
|
||||||
sha512sum artifact/yt-dlp.tar.gz | awk '{print $1 " yt-dlp.tar.gz"}' >> SHA2-512SUMS
|
|
||||||
sha512sum artifact/yt-dlp.exe | awk '{print $1 " yt-dlp.exe"}' >> SHA2-512SUMS
|
|
||||||
sha512sum artifact/yt-dlp_win.zip | awk '{print $1 " yt-dlp_win.zip"}' >> SHA2-512SUMS
|
|
||||||
sha512sum artifact/yt-dlp_min.exe | awk '{print $1 " yt-dlp_min.exe"}' >> SHA2-512SUMS
|
|
||||||
sha512sum artifact/yt-dlp_x86.exe | awk '{print $1 " yt-dlp_x86.exe"}' >> SHA2-512SUMS
|
|
||||||
sha512sum artifact/yt-dlp_macos | awk '{print $1 " yt-dlp_macos"}' >> SHA2-512SUMS
|
|
||||||
sha512sum artifact/yt-dlp_macos.zip | awk '{print $1 " yt-dlp_macos.zip"}' >> SHA2-512SUMS
|
|
||||||
sha512sum artifact/yt-dlp_macos_legacy | awk '{print $1 " yt-dlp_macos_legacy"}' >> SHA2-512SUMS
|
|
||||||
sha512sum artifact/dist/yt-dlp_linux | awk '{print $1 " yt-dlp_linux"}' >> SHA2-512SUMS
|
|
||||||
sha512sum artifact/dist/yt-dlp_linux.zip | awk '{print $1 " yt-dlp_linux.zip"}' >> SHA2-512SUMS
|
|
||||||
|
|
||||||
- name: Publish Release
|
|
||||||
uses: yt-dlp/action-gh-release@v1
|
|
||||||
with:
|
with:
|
||||||
tag_name: ${{ needs.prepare.outputs.ytdlp_version }}
|
upload_url: ${{ needs.create_release.outputs.upload_url }}
|
||||||
name: yt-dlp ${{ needs.prepare.outputs.ytdlp_version }}
|
asset_path: ./_update_spec
|
||||||
target_commitish: ${{ needs.prepare.outputs.head_sha }}
|
asset_name: _update_spec
|
||||||
body: |
|
asset_content_type: text/plain
|
||||||
#### [A description of the various files]((https://github.com/yt-dlp/yt-dlp#release-files)) are in the README
|
|
||||||
|
|
||||||
---
|
- name: Finalize release
|
||||||
<details open><summary><h3>Changelog</summary>
|
env:
|
||||||
<p>
|
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
run: |
|
||||||
${{ env.changelog }}
|
gh api -X PATCH -H "Accept: application/vnd.github.v3+json" \
|
||||||
|
/repos/${{ github.repository }}/releases/${{ needs.create_release.outputs.release_id }} \
|
||||||
</p>
|
-F draft=false
|
||||||
</details>
|
|
||||||
files: |
|
|
||||||
SHA2-256SUMS
|
|
||||||
SHA2-512SUMS
|
|
||||||
artifact/yt-dlp
|
|
||||||
artifact/yt-dlp.tar.gz
|
|
||||||
artifact/yt-dlp.exe
|
|
||||||
artifact/yt-dlp_win.zip
|
|
||||||
artifact/yt-dlp_min.exe
|
|
||||||
artifact/yt-dlp_x86.exe
|
|
||||||
artifact/yt-dlp_macos
|
|
||||||
artifact/yt-dlp_macos.zip
|
|
||||||
artifact/yt-dlp_macos_legacy
|
|
||||||
artifact/dist/yt-dlp_linux
|
|
||||||
artifact/dist/yt-dlp_linux.zip
|
|
||||||
_update_spec
|
|
||||||
|
6
.github/workflows/core.yml
vendored
6
.github/workflows/core.yml
vendored
@ -10,7 +10,7 @@ jobs:
|
|||||||
matrix:
|
matrix:
|
||||||
os: [ubuntu-latest]
|
os: [ubuntu-latest]
|
||||||
# CPython 3.9 is in quick-test
|
# CPython 3.9 is in quick-test
|
||||||
python-version: ['3.7', '3.10', 3.11-dev, pypy-3.7, pypy-3.8]
|
python-version: ['3.6', '3.7', '3.10', 3.11-dev, pypy-3.6, pypy-3.7, pypy-3.8]
|
||||||
run-tests-ext: [sh]
|
run-tests-ext: [sh]
|
||||||
include:
|
include:
|
||||||
# atleast one of each CPython/PyPy tests must be in windows
|
# atleast one of each CPython/PyPy tests must be in windows
|
||||||
@ -21,9 +21,9 @@ jobs:
|
|||||||
python-version: pypy-3.9
|
python-version: pypy-3.9
|
||||||
run-tests-ext: bat
|
run-tests-ext: bat
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v2
|
||||||
- name: Set up Python ${{ matrix.python-version }}
|
- name: Set up Python ${{ matrix.python-version }}
|
||||||
uses: actions/setup-python@v4
|
uses: actions/setup-python@v2
|
||||||
with:
|
with:
|
||||||
python-version: ${{ matrix.python-version }}
|
python-version: ${{ matrix.python-version }}
|
||||||
- name: Install pytest
|
- name: Install pytest
|
||||||
|
10
.github/workflows/download.yml
vendored
10
.github/workflows/download.yml
vendored
@ -6,9 +6,9 @@ jobs:
|
|||||||
if: "contains(github.event.head_commit.message, 'ci run dl')"
|
if: "contains(github.event.head_commit.message, 'ci run dl')"
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v2
|
||||||
- name: Set up Python
|
- name: Set up Python
|
||||||
uses: actions/setup-python@v4
|
uses: actions/setup-python@v2
|
||||||
with:
|
with:
|
||||||
python-version: 3.9
|
python-version: 3.9
|
||||||
- name: Install test requirements
|
- name: Install test requirements
|
||||||
@ -25,7 +25,7 @@ jobs:
|
|||||||
fail-fast: true
|
fail-fast: true
|
||||||
matrix:
|
matrix:
|
||||||
os: [ubuntu-latest]
|
os: [ubuntu-latest]
|
||||||
python-version: ['3.7', '3.10', 3.11-dev, pypy-3.7, pypy-3.8]
|
python-version: ['3.6', '3.7', '3.10', 3.11-dev, pypy-3.6, pypy-3.7, pypy-3.8]
|
||||||
run-tests-ext: [sh]
|
run-tests-ext: [sh]
|
||||||
include:
|
include:
|
||||||
# atleast one of each CPython/PyPy tests must be in windows
|
# atleast one of each CPython/PyPy tests must be in windows
|
||||||
@ -36,9 +36,9 @@ jobs:
|
|||||||
python-version: pypy-3.9
|
python-version: pypy-3.9
|
||||||
run-tests-ext: bat
|
run-tests-ext: bat
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v2
|
||||||
- name: Set up Python ${{ matrix.python-version }}
|
- name: Set up Python ${{ matrix.python-version }}
|
||||||
uses: actions/setup-python@v4
|
uses: actions/setup-python@v2
|
||||||
with:
|
with:
|
||||||
python-version: ${{ matrix.python-version }}
|
python-version: ${{ matrix.python-version }}
|
||||||
- name: Install pytest
|
- name: Install pytest
|
||||||
|
8
.github/workflows/quick-test.yml
vendored
8
.github/workflows/quick-test.yml
vendored
@ -6,9 +6,9 @@ jobs:
|
|||||||
if: "!contains(github.event.head_commit.message, 'ci skip all')"
|
if: "!contains(github.event.head_commit.message, 'ci skip all')"
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v2
|
||||||
- name: Set up Python
|
- name: Set up Python
|
||||||
uses: actions/setup-python@v4
|
uses: actions/setup-python@v2
|
||||||
with:
|
with:
|
||||||
python-version: 3.9
|
python-version: 3.9
|
||||||
- name: Install test requirements
|
- name: Install test requirements
|
||||||
@ -20,9 +20,9 @@ jobs:
|
|||||||
if: "!contains(github.event.head_commit.message, 'ci skip all')"
|
if: "!contains(github.event.head_commit.message, 'ci skip all')"
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v2
|
||||||
- name: Set up Python
|
- name: Set up Python
|
||||||
uses: actions/setup-python@v4
|
uses: actions/setup-python@v2
|
||||||
with:
|
with:
|
||||||
python-version: 3.9
|
python-version: 3.9
|
||||||
- name: Install flake8
|
- name: Install flake8
|
||||||
|
@ -222,7 +222,7 @@ After you have ensured this site is distributing its content legally, you can fo
|
|||||||
|
|
||||||
$ flake8 yt_dlp/extractor/yourextractor.py
|
$ flake8 yt_dlp/extractor/yourextractor.py
|
||||||
|
|
||||||
1. Make sure your code works under all [Python](https://www.python.org/) versions supported by yt-dlp, namely CPython and PyPy for Python 3.7 and above. Backward compatibility is not required for even older versions of Python.
|
1. Make sure your code works under all [Python](https://www.python.org/) versions supported by yt-dlp, namely CPython and PyPy for Python 3.6 and above. Backward compatibility is not required for even older versions of Python.
|
||||||
1. When the tests pass, [add](https://git-scm.com/docs/git-add) the new files, [commit](https://git-scm.com/docs/git-commit) them and [push](https://git-scm.com/docs/git-push) the result, like this:
|
1. When the tests pass, [add](https://git-scm.com/docs/git-add) the new files, [commit](https://git-scm.com/docs/git-commit) them and [push](https://git-scm.com/docs/git-push) the result, like this:
|
||||||
|
|
||||||
$ git add yt_dlp/extractor/_extractors.py
|
$ git add yt_dlp/extractor/_extractors.py
|
||||||
|
@ -285,12 +285,3 @@ odo2063
|
|||||||
pritam20ps05
|
pritam20ps05
|
||||||
scy
|
scy
|
||||||
sheerluck
|
sheerluck
|
||||||
AxiosDeminence
|
|
||||||
DjesonPV
|
|
||||||
eren-kemer
|
|
||||||
freezboltz
|
|
||||||
Galiley
|
|
||||||
haobinliang
|
|
||||||
Mehavoid
|
|
||||||
winterbird-code
|
|
||||||
yashkc2025
|
|
||||||
|
86
Changelog.md
86
Changelog.md
@ -11,92 +11,6 @@
|
|||||||
-->
|
-->
|
||||||
|
|
||||||
|
|
||||||
### 2022.08.08
|
|
||||||
|
|
||||||
* **Remove Python 3.6 support**
|
|
||||||
* Determine merge container better by [pukkandan](https://github.com/pukkandan), [selfisekai](https://github.com/selfisekai)
|
|
||||||
* Framework for embed detection by [coletdjnz](https://github.com/coletdjnz), [pukkandan](https://github.com/pukkandan)
|
|
||||||
* Merge youtube-dl: Upto [commit/adb5294](https://github.com/ytdl-org/youtube-dl/commit/adb5294)
|
|
||||||
* `--compat-option no-live-chat` should disable danmaku
|
|
||||||
* Fix misleading DRM message
|
|
||||||
* Import ctypes only when necessary
|
|
||||||
* Minor bugfixes by [pukkandan](https://github.com/pukkandan)
|
|
||||||
* Reject entire playlists faster with `--match-filter` by [pukkandan](https://github.com/pukkandan)
|
|
||||||
* Remove filtered entries from `-J`
|
|
||||||
* Standardize retry mechanism by [pukkandan](https://github.com/pukkandan)
|
|
||||||
* Validate `--merge-output-format`
|
|
||||||
* [downloader] Add average speed to final progress line
|
|
||||||
* [extractor] Add field `audio_channels`
|
|
||||||
* [extractor] Support multiple archive ids for one video
|
|
||||||
* [ffmpeg] Set `ffmpeg_location` in a contextvar
|
|
||||||
* [FFmpegThumbnailsConvertor] Fix conversion from GIF
|
|
||||||
* [MetadataParser] Don't set `None` when the field didn't match
|
|
||||||
* [outtmpl] Smarter replacing of unsupported characters by [pukkandan](https://github.com/pukkandan)
|
|
||||||
* [outtmpl] Treat empty values as None in filenames
|
|
||||||
* [utils] sanitize_open: Allow any IO stream as stdout
|
|
||||||
* [build, devscripts] Add devscript to set a build variant
|
|
||||||
* [build] Improve build process by [shirt-dev](https://github.com/shirt-dev)
|
|
||||||
* [build] Update pyinstaller
|
|
||||||
* [devscripts] Create `utils` and refactor
|
|
||||||
* [docs] Clarify `best*`
|
|
||||||
* [docs] Fix bug report issue template
|
|
||||||
* [docs] Fix capitalization in references by [christoph-heinrich](https://github.com/christoph-heinrich)
|
|
||||||
* [cleanup, mhtml] Use imghdr
|
|
||||||
* [cleanup, utils] Consolidate known media extensions
|
|
||||||
* [cleanup] Misc fixes and cleanup
|
|
||||||
* [extractor/angel] Add extractor by [AxiosDeminence](https://github.com/AxiosDeminence)
|
|
||||||
* [extractor/dplay] Add MotorTrend extractor by [Sipherdrakon](https://github.com/Sipherdrakon)
|
|
||||||
* [extractor/harpodeon] Add extractor by [eren-kemer](https://github.com/eren-kemer)
|
|
||||||
* [extractor/holodex] Add extractor by [pukkandan](https://github.com/pukkandan), [sqrtNOT](https://github.com/sqrtNOT)
|
|
||||||
* [extractor/kompas] Add extractor by [HobbyistDev](https://github.com/HobbyistDev)
|
|
||||||
* [extractor/rai] Add raisudtirol extractor by [nixxo](https://github.com/nixxo)
|
|
||||||
* [extractor/tempo] Add extractor by [HobbyistDev](https://github.com/HobbyistDev)
|
|
||||||
* [extractor/youtube] **Fixes for third party client detection** by [coletdjnz](https://github.com/coletdjnz)
|
|
||||||
* [extractor/youtube] Add `live_status=post_live` by [lazypete365](https://github.com/lazypete365)
|
|
||||||
* [extractor/youtube] Extract more format info
|
|
||||||
* [extractor/youtube] Parse translated subtitles only when requested
|
|
||||||
* [extractor/youtube, extractor/twitch] Allow waiting for channels to become live
|
|
||||||
* [extractor/youtube, webvtt] Extract auto-subs from livestream VODs by [fstirlitz](https://github.com/fstirlitz), [pukkandan](https://github.com/pukkandan)
|
|
||||||
* [extractor/AbemaTVTitle] Implement paging by [Lesmiscore](https://github.com/Lesmiscore)
|
|
||||||
* [extractor/archiveorg] Improve handling of formats by [coletdjnz](https://github.com/coletdjnz), [pukkandan](https://github.com/pukkandan)
|
|
||||||
* [extractor/arte] Fix title extraction
|
|
||||||
* [extractor/arte] **Move to v2 API** by [fstirlitz](https://github.com/fstirlitz), [pukkandan](https://github.com/pukkandan)
|
|
||||||
* [extractor/bbc] Fix news articles by [ajj8](https://github.com/ajj8)
|
|
||||||
* [extractor/camtasia] Separate into own extractor by [coletdjnz](https://github.com/coletdjnz)
|
|
||||||
* [extractor/cloudflarestream] Fix video_id padding by [haobinliang](https://github.com/haobinliang)
|
|
||||||
* [extractor/crunchyroll] Fix conversion of thumbnail from GIF by [pukkandan](https://github.com/pukkandan)
|
|
||||||
* [extractor/crunchyroll] Handle missing metadata correctly by [Burve](https://github.com/Burve), [pukkandan](https://github.com/pukkandan)
|
|
||||||
* [extractor/crunchyroll:beta] Extract timestamp and fix tests by [tejing1](https://github.com/tejing1)
|
|
||||||
* [extractor/crunchyroll:beta] Use streams API by [tejing1](https://github.com/tejing1)
|
|
||||||
* [extractor/doodstream] Support more domains by [Galiley](https://github.com/Galiley)
|
|
||||||
* [extractor/ESPN] Extract duration by [ischmidt20](https://github.com/ischmidt20)
|
|
||||||
* [extractor/FIFA] Change API endpoint by [Bricio](https://github.com/Bricio), [yashkc2025](https://github.com/yashkc2025)
|
|
||||||
* [extractor/globo:article] Remove false positives by [Bricio](https://github.com/Bricio)
|
|
||||||
* [extractor/Go] Extract timestamp by [ischmidt20](https://github.com/ischmidt20)
|
|
||||||
* [extractor/hidive] Fix cookie login when netrc is also given by [winterbird-code](https://github.com/winterbird-code)
|
|
||||||
* [extractor/html5] Separate into own extractor by [coletdjnz](https://github.com/coletdjnz), [pukkandan](https://github.com/pukkandan)
|
|
||||||
* [extractor/ina] Improve extractor by [elyse0](https://github.com/elyse0)
|
|
||||||
* [extractor/NaverNow] Change endpoint by [ping](https://github.com/ping)
|
|
||||||
* [extractor/ninegag] Extract uploader by [DjesonPV](https://github.com/DjesonPV)
|
|
||||||
* [extractor/NovaPlay] Fix extractor by [Bojidarist](https://github.com/Bojidarist)
|
|
||||||
* [extractor/orf:radio] Rewrite extractors
|
|
||||||
* [extractor/patreon] Fix and improve extractors by [coletdjnz](https://github.com/coletdjnz), [pukkandan](https://github.com/pukkandan)
|
|
||||||
* [extractor/rai] Fix RaiNews extraction by [nixxo](https://github.com/nixxo)
|
|
||||||
* [extractor/redbee] Unify and update extractors by [elyse0](https://github.com/elyse0)
|
|
||||||
* [extractor/stripchat] Fix _VALID_URL by [freezboltz](https://github.com/freezboltz)
|
|
||||||
* [extractor/tubi] Exclude playlists from playlist entries by [sqrtNOT](https://github.com/sqrtNOT)
|
|
||||||
* [extractor/tviplayer] Improve `_VALID_URL` by [HobbyistDev](https://github.com/HobbyistDev)
|
|
||||||
* [extractor/twitch] Extract chapters for single chapter VODs by [mpeter50](https://github.com/mpeter50)
|
|
||||||
* [extractor/vgtv] Support tv.vg.no by [sqrtNOT](https://github.com/sqrtNOT)
|
|
||||||
* [extractor/vidio] Support embed link by [HobbyistDev](https://github.com/HobbyistDev)
|
|
||||||
* [extractor/vk] Fix extractor by [Mehavoid](https://github.com/Mehavoid)
|
|
||||||
* [extractor/WASDTV:record] Fix `_VALID_URL`
|
|
||||||
* [extractor/xfileshare] Add Referer by [Galiley](https://github.com/Galiley)
|
|
||||||
* [extractor/YahooJapanNews] Fix extractor by [Lesmiscore](https://github.com/Lesmiscore)
|
|
||||||
* [extractor/yandexmusic] Extract higher quality format
|
|
||||||
* [extractor/zee5] Update Device ID by [m4tu4g](https://github.com/m4tu4g)
|
|
||||||
|
|
||||||
|
|
||||||
### 2022.07.18
|
### 2022.07.18
|
||||||
|
|
||||||
* Allow users to specify encoding in each config files by [Lesmiscore](https://github.com/Lesmiscore)
|
* Allow users to specify encoding in each config files by [Lesmiscore](https://github.com/Lesmiscore)
|
||||||
|
65
README.md
65
README.md
@ -71,7 +71,7 @@ yt-dlp is a [youtube-dl](https://github.com/ytdl-org/youtube-dl) fork based on t
|
|||||||
|
|
||||||
# NEW FEATURES
|
# NEW FEATURES
|
||||||
|
|
||||||
* Merged with **youtube-dl v2021.12.17+ [commit/adb5294](https://github.com/ytdl-org/youtube-dl/commit/adb5294177265ba35b45746dbb600965076ed150)**<!--([exceptions](https://github.com/yt-dlp/yt-dlp/issues/21))--> and **youtube-dlc v2020.11.11-3+ [commit/f9401f2](https://github.com/blackjack4494/yt-dlc/commit/f9401f2a91987068139c5f757b12fc711d4c0cee)**: You get all the features and patches of [youtube-dlc](https://github.com/blackjack4494/yt-dlc) in addition to the latest [youtube-dl](https://github.com/ytdl-org/youtube-dl)
|
* Merged with **youtube-dl v2021.12.17+ [commit/a03b977](https://github.com/ytdl-org/youtube-dl/commit/a03b9775d544b06a5b4f2aa630214c7c22fc2229)**<!--([exceptions](https://github.com/yt-dlp/yt-dlp/issues/21))--> and **youtube-dlc v2020.11.11-3+ [commit/f9401f2](https://github.com/blackjack4494/yt-dlc/commit/f9401f2a91987068139c5f757b12fc711d4c0cee)**: You get all the features and patches of [youtube-dlc](https://github.com/blackjack4494/yt-dlc) in addition to the latest [youtube-dl](https://github.com/ytdl-org/youtube-dl)
|
||||||
|
|
||||||
* **[SponsorBlock Integration](#sponsorblock-options)**: You can mark/remove sponsor sections in youtube videos by utilizing the [SponsorBlock](https://sponsor.ajay.app) API
|
* **[SponsorBlock Integration](#sponsorblock-options)**: You can mark/remove sponsor sections in youtube videos by utilizing the [SponsorBlock](https://sponsor.ajay.app) API
|
||||||
|
|
||||||
@ -105,7 +105,7 @@ yt-dlp is a [youtube-dl](https://github.com/ytdl-org/youtube-dl) fork based on t
|
|||||||
|
|
||||||
* **Multiple paths and output templates**: You can give different [output templates](#output-template) and download paths for different types of files. You can also set a temporary path where intermediary files are downloaded to using `--paths` (`-P`)
|
* **Multiple paths and output templates**: You can give different [output templates](#output-template) and download paths for different types of files. You can also set a temporary path where intermediary files are downloaded to using `--paths` (`-P`)
|
||||||
|
|
||||||
* **Portable Configuration**: Configuration files are automatically loaded from the home and root directories. See [CONFIGURATION](#configuration) for details
|
* **Portable Configuration**: Configuration files are automatically loaded from the home and root directories. See [configuration](#configuration) for details
|
||||||
|
|
||||||
* **Output template improvements**: Output templates can now have date-time formatting, numeric offsets, object traversal etc. See [output template](#output-template) for details. Even more advanced operations can also be done with the help of `--parse-metadata` and `--replace-in-metadata`
|
* **Output template improvements**: Output templates can now have date-time formatting, numeric offsets, object traversal etc. See [output template](#output-template) for details. Even more advanced operations can also be done with the help of `--parse-metadata` and `--replace-in-metadata`
|
||||||
|
|
||||||
@ -127,7 +127,7 @@ Some of yt-dlp's default options are different from that of youtube-dl and youtu
|
|||||||
|
|
||||||
* The options `--auto-number` (`-A`), `--title` (`-t`) and `--literal` (`-l`), no longer work. See [removed options](#Removed) for details
|
* The options `--auto-number` (`-A`), `--title` (`-t`) and `--literal` (`-l`), no longer work. See [removed options](#Removed) for details
|
||||||
* `avconv` is not supported as an alternative to `ffmpeg`
|
* `avconv` is not supported as an alternative to `ffmpeg`
|
||||||
* yt-dlp stores config files in slightly different locations to youtube-dl. See [CONFIGURATION](#configuration) for a list of correct locations
|
* yt-dlp stores config files in slightly different locations to youtube-dl. See [configuration](#configuration) for a list of correct locations
|
||||||
* The default [output template](#output-template) is `%(title)s [%(id)s].%(ext)s`. There is no real reason for this change. This was changed before yt-dlp was ever made public and now there are no plans to change it back to `%(title)s-%(id)s.%(ext)s`. Instead, you may use `--compat-options filename`
|
* The default [output template](#output-template) is `%(title)s [%(id)s].%(ext)s`. There is no real reason for this change. This was changed before yt-dlp was ever made public and now there are no plans to change it back to `%(title)s-%(id)s.%(ext)s`. Instead, you may use `--compat-options filename`
|
||||||
* The default [format sorting](#sorting-formats) is different from youtube-dl and prefers higher resolution and better codecs rather than higher bitrates. You can use the `--format-sort` option to change this to any order you prefer, or use `--compat-options format-sort` to use youtube-dl's sorting order
|
* The default [format sorting](#sorting-formats) is different from youtube-dl and prefers higher resolution and better codecs rather than higher bitrates. You can use the `--format-sort` option to change this to any order you prefer, or use `--compat-options format-sort` to use youtube-dl's sorting order
|
||||||
* The default format selector is `bv*+ba/b`. This means that if a combined video + audio format that is better than the best video-only format is found, the former will be preferred. Use `-f bv+ba/b` or `--compat-options format-spec` to revert this
|
* The default format selector is `bv*+ba/b`. This means that if a combined video + audio format that is better than the best video-only format is found, the former will be preferred. Use `-f bv+ba/b` or `--compat-options format-spec` to revert this
|
||||||
@ -138,7 +138,8 @@ Some of yt-dlp's default options are different from that of youtube-dl and youtu
|
|||||||
* Some metadata are embedded into different fields when using `--add-metadata` as compared to youtube-dl. Most notably, `comment` field contains the `webpage_url` and `synopsis` contains the `description`. You can [use `--parse-metadata`](#modifying-metadata) to modify this to your liking or use `--compat-options embed-metadata` to revert this
|
* Some metadata are embedded into different fields when using `--add-metadata` as compared to youtube-dl. Most notably, `comment` field contains the `webpage_url` and `synopsis` contains the `description`. You can [use `--parse-metadata`](#modifying-metadata) to modify this to your liking or use `--compat-options embed-metadata` to revert this
|
||||||
* `playlist_index` behaves differently when used with options like `--playlist-reverse` and `--playlist-items`. See [#302](https://github.com/yt-dlp/yt-dlp/issues/302) for details. You can use `--compat-options playlist-index` if you want to keep the earlier behavior
|
* `playlist_index` behaves differently when used with options like `--playlist-reverse` and `--playlist-items`. See [#302](https://github.com/yt-dlp/yt-dlp/issues/302) for details. You can use `--compat-options playlist-index` if you want to keep the earlier behavior
|
||||||
* The output of `-F` is listed in a new format. Use `--compat-options list-formats` to revert this
|
* The output of `-F` is listed in a new format. Use `--compat-options list-formats` to revert this
|
||||||
* Live chats (if available) are considered as subtitles. Use `--sub-langs all,-live_chat` to download all subtitles except live chat. You can also use `--compat-options no-live-chat` to prevent any live chat/danmaku from downloading
|
* All *experiences* of a funimation episode are considered as a single video. This behavior breaks existing archives. Use `--compat-options seperate-video-versions` to extract information from only the default player
|
||||||
|
* Youtube live chat (if available) is considered as a subtitle. Use `--sub-langs all,-live_chat` to download all subtitles except live chat. You can also use `--compat-options no-live-chat` to prevent live chat from downloading
|
||||||
* Youtube channel URLs are automatically redirected to `/video`. Append a `/featured` to the URL to download only the videos in the home page. If the channel does not have a videos tab, we try to download the equivalent `UU` playlist instead. For all other tabs, if the channel does not show the requested tab, an error will be raised. Also, `/live` URLs raise an error if there are no live videos instead of silently downloading the entire channel. You may use `--compat-options no-youtube-channel-redirect` to revert all these redirections
|
* Youtube channel URLs are automatically redirected to `/video`. Append a `/featured` to the URL to download only the videos in the home page. If the channel does not have a videos tab, we try to download the equivalent `UU` playlist instead. For all other tabs, if the channel does not show the requested tab, an error will be raised. Also, `/live` URLs raise an error if there are no live videos instead of silently downloading the entire channel. You may use `--compat-options no-youtube-channel-redirect` to revert all these redirections
|
||||||
* Unavailable videos are also listed for youtube playlists. Use `--compat-options no-youtube-unavailable-videos` to remove this
|
* Unavailable videos are also listed for youtube playlists. Use `--compat-options no-youtube-unavailable-videos` to remove this
|
||||||
* If `ffmpeg` is used as the downloader, the downloading and merging of formats happen in a single step when possible. Use `--compat-options no-direct-merge` to revert this
|
* If `ffmpeg` is used as the downloader, the downloading and merging of formats happen in a single step when possible. Use `--compat-options no-direct-merge` to revert this
|
||||||
@ -312,7 +313,7 @@ If you do not have the necessary dependencies for a task you are attempting, yt-
|
|||||||
## COMPILE
|
## COMPILE
|
||||||
|
|
||||||
### Standalone PyInstaller Builds
|
### Standalone PyInstaller Builds
|
||||||
To build the standalone executable, you must have Python and `pyinstaller` (plus any of yt-dlp's [optional dependencies](#dependencies) if needed). Once you have all the necessary dependencies installed, simply run `pyinst.py`. The executable will be built for the same architecture (x86/ARM, 32/64 bit) as the Python used.
|
To build the Windows/MacOS executable, you must have Python and `pyinstaller` (plus any of yt-dlp's [optional dependencies](#dependencies) if needed). Once you have all the necessary dependencies installed, simply run `pyinst.py`. The executable will be built for the same architecture (32/64 bit) as the Python used.
|
||||||
|
|
||||||
python3 -m pip install -U pyinstaller -r requirements.txt
|
python3 -m pip install -U pyinstaller -r requirements.txt
|
||||||
python3 devscripts/make_lazy_extractors.py
|
python3 devscripts/make_lazy_extractors.py
|
||||||
@ -343,8 +344,7 @@ If you wish to build it anyway, install Python and py2exe, and then simply run `
|
|||||||
|
|
||||||
### Related scripts
|
### Related scripts
|
||||||
|
|
||||||
* **`devscripts/update-version.py [revision]`** - Update the version number based on current date
|
* **`devscripts/update-version.py`** - Update the version number based on current timestamp
|
||||||
* **`devscripts/set-variant.py variant [-M update_message]`** - Set the build variant of the executable
|
|
||||||
* **`devscripts/make_lazy_extractors.py`** - Create lazy extractors. Running this before building the binaries (any variant) will improve their startup performance. Set the environment variable `YTDLP_NO_LAZY_EXTRACTORS=1` if you wish to forcefully disable lazy extractor loading.
|
* **`devscripts/make_lazy_extractors.py`** - Create lazy extractors. Running this before building the binaries (any variant) will improve their startup performance. Set the environment variable `YTDLP_NO_LAZY_EXTRACTORS=1` if you wish to forcefully disable lazy extractor loading.
|
||||||
|
|
||||||
You can also fork the project on github and run your fork's [build workflow](.github/workflows/build.yml) to automatically build a full release
|
You can also fork the project on github and run your fork's [build workflow](.github/workflows/build.yml) to automatically build a full release
|
||||||
@ -361,8 +361,8 @@ You can also fork the project on github and run your fork's [build workflow](.gi
|
|||||||
## General Options:
|
## General Options:
|
||||||
-h, --help Print this help text and exit
|
-h, --help Print this help text and exit
|
||||||
--version Print program version and exit
|
--version Print program version and exit
|
||||||
-U, --update Update this program to the latest version
|
-U, --update Update this program to latest version
|
||||||
--no-update Do not check for updates (default)
|
--no-update Do not update (default)
|
||||||
-i, --ignore-errors Ignore download and postprocessing errors.
|
-i, --ignore-errors Ignore download and postprocessing errors.
|
||||||
The download will be considered successful
|
The download will be considered successful
|
||||||
even if the postprocessing fails
|
even if the postprocessing fails
|
||||||
@ -491,7 +491,7 @@ You can also fork the project on github and run your fork's [build workflow](.gi
|
|||||||
--match-filters FILTER Generic video filter. Any "OUTPUT TEMPLATE"
|
--match-filters FILTER Generic video filter. Any "OUTPUT TEMPLATE"
|
||||||
field can be compared with a number or a
|
field can be compared with a number or a
|
||||||
string using the operators defined in
|
string using the operators defined in
|
||||||
"Filtering Formats". You can also simply
|
"Filtering formats". You can also simply
|
||||||
specify a field to match if the field is
|
specify a field to match if the field is
|
||||||
present, use "!field" to check if the field
|
present, use "!field" to check if the field
|
||||||
is not present, and "&" to check multiple
|
is not present, and "&" to check multiple
|
||||||
@ -547,14 +547,14 @@ You can also fork the project on github and run your fork's [build workflow](.gi
|
|||||||
error (default is 3), or "infinite"
|
error (default is 3), or "infinite"
|
||||||
--fragment-retries RETRIES Number of retries for a fragment (default is
|
--fragment-retries RETRIES Number of retries for a fragment (default is
|
||||||
10), or "infinite" (DASH, hlsnative and ISM)
|
10), or "infinite" (DASH, hlsnative and ISM)
|
||||||
--retry-sleep [TYPE:]EXPR Time to sleep between retries in seconds
|
--retry-sleep [TYPE:]EXPR An expression for the time to sleep between
|
||||||
(optionally) prefixed by the type of retry
|
retries in seconds (optionally) prefixed by
|
||||||
(http (default), fragment, file_access,
|
the type of retry (file_access, fragment,
|
||||||
extractor) to apply the sleep to. EXPR can
|
http (default)) to apply the sleep to. EXPR
|
||||||
be a number, linear=START[:END[:STEP=1]] or
|
can be a number, linear=START[:END[:STEP=1]]
|
||||||
exp=START[:END[:BASE=2]]. This option can be
|
or exp=START[:END[:BASE=2]]. This option can
|
||||||
used multiple times to set the sleep for the
|
be used multiple times to set the sleep for
|
||||||
different retry types. Eg: --retry-sleep
|
the different retry types. Eg: --retry-sleep
|
||||||
linear=1::2 --retry-sleep fragment:exp=1:20
|
linear=1::2 --retry-sleep fragment:exp=1:20
|
||||||
--skip-unavailable-fragments Skip unavailable fragments for DASH,
|
--skip-unavailable-fragments Skip unavailable fragments for DASH,
|
||||||
hlsnative and ISM downloads (default)
|
hlsnative and ISM downloads (default)
|
||||||
@ -859,10 +859,10 @@ You can also fork the project on github and run your fork's [build workflow](.gi
|
|||||||
downloadable
|
downloadable
|
||||||
-F, --list-formats List available formats of each video.
|
-F, --list-formats List available formats of each video.
|
||||||
Simulate unless --no-simulate is used
|
Simulate unless --no-simulate is used
|
||||||
--merge-output-format FORMAT Containers that may be used when merging
|
--merge-output-format FORMAT If a merge is required (e.g.
|
||||||
formats, separated by "/" (Eg: "mp4/mkv").
|
bestvideo+bestaudio), output to given
|
||||||
Ignored if no merge is required. (currently
|
container format. One of mkv, mp4, ogg,
|
||||||
supported: avi, flv, mkv, mov, mp4, webm)
|
webm, flv. Ignored if no merge is required
|
||||||
|
|
||||||
## Subtitle Options:
|
## Subtitle Options:
|
||||||
--write-subs Write subtitle file
|
--write-subs Write subtitle file
|
||||||
@ -916,7 +916,7 @@ You can also fork the project on github and run your fork's [build workflow](.gi
|
|||||||
(requires ffmpeg and ffprobe)
|
(requires ffmpeg and ffprobe)
|
||||||
--audio-format FORMAT Format to convert the audio to when -x is
|
--audio-format FORMAT Format to convert the audio to when -x is
|
||||||
used. (currently supported: best (default),
|
used. (currently supported: best (default),
|
||||||
aac, alac, flac, m4a, mp3, opus, vorbis,
|
mp3, aac, m4a, opus, vorbis, flac, alac,
|
||||||
wav). You can specify multiple rules using
|
wav). You can specify multiple rules using
|
||||||
similar syntax as --remux-video
|
similar syntax as --remux-video
|
||||||
--audio-quality QUALITY Specify ffmpeg audio quality to use when
|
--audio-quality QUALITY Specify ffmpeg audio quality to use when
|
||||||
@ -924,9 +924,9 @@ You can also fork the project on github and run your fork's [build workflow](.gi
|
|||||||
between 0 (best) and 10 (worst) for VBR or a
|
between 0 (best) and 10 (worst) for VBR or a
|
||||||
specific bitrate like 128K (default 5)
|
specific bitrate like 128K (default 5)
|
||||||
--remux-video FORMAT Remux the video into another container if
|
--remux-video FORMAT Remux the video into another container if
|
||||||
necessary (currently supported: avi, flv,
|
necessary (currently supported: mp4, mkv,
|
||||||
mkv, mov, mp4, webm, aac, aiff, alac, flac,
|
flv, webm, mov, avi, mka, ogg, mp3, aac,
|
||||||
m4a, mka, mp3, ogg, opus, vorbis, wav). If
|
m4a, opus, vorbis, flac, alac, wav). If
|
||||||
target container does not support the
|
target container does not support the
|
||||||
video/audio codec, remuxing will fail. You
|
video/audio codec, remuxing will fail. You
|
||||||
can specify multiple rules; Eg.
|
can specify multiple rules; Eg.
|
||||||
@ -1025,7 +1025,7 @@ You can also fork the project on github and run your fork's [build workflow](.gi
|
|||||||
be used multiple times
|
be used multiple times
|
||||||
--no-exec Remove any previously defined --exec
|
--no-exec Remove any previously defined --exec
|
||||||
--convert-subs FORMAT Convert the subtitles to another format
|
--convert-subs FORMAT Convert the subtitles to another format
|
||||||
(currently supported: ass, lrc, srt, vtt)
|
(currently supported: srt, vtt, ass, lrc)
|
||||||
(Alias: --convert-subtitles)
|
(Alias: --convert-subtitles)
|
||||||
--convert-thumbnails FORMAT Convert the thumbnails to another format
|
--convert-thumbnails FORMAT Convert the thumbnails to another format
|
||||||
(currently supported: jpg, png, webp). You
|
(currently supported: jpg, png, webp). You
|
||||||
@ -1257,7 +1257,7 @@ The available fields are:
|
|||||||
- `average_rating` (numeric): Average rating give by users, the scale used depends on the webpage
|
- `average_rating` (numeric): Average rating give by users, the scale used depends on the webpage
|
||||||
- `comment_count` (numeric): Number of comments on the video (For some extractors, comments are only downloaded at the end, and so this field cannot be used)
|
- `comment_count` (numeric): Number of comments on the video (For some extractors, comments are only downloaded at the end, and so this field cannot be used)
|
||||||
- `age_limit` (numeric): Age restriction for the video (years)
|
- `age_limit` (numeric): Age restriction for the video (years)
|
||||||
- `live_status` (string): One of "not_live", "is_live", "is_upcoming", "was_live", "post_live" (was live, but VOD is not yet processed)
|
- `live_status` (string): One of "is_live", "was_live", "is_upcoming", "not_live"
|
||||||
- `is_live` (boolean): Whether this video is a live stream or a fixed-length video
|
- `is_live` (boolean): Whether this video is a live stream or a fixed-length video
|
||||||
- `was_live` (boolean): Whether this video was originally a live stream
|
- `was_live` (boolean): Whether this video was originally a live stream
|
||||||
- `playable_in_embed` (string): Whether this video is allowed to play in embedded players on other sites
|
- `playable_in_embed` (string): Whether this video is allowed to play in embedded players on other sites
|
||||||
@ -1277,7 +1277,6 @@ The available fields are:
|
|||||||
- `vbr` (numeric): Average video bitrate in KBit/s
|
- `vbr` (numeric): Average video bitrate in KBit/s
|
||||||
- `fps` (numeric): Frame rate
|
- `fps` (numeric): Frame rate
|
||||||
- `dynamic_range` (string): The dynamic range of the video
|
- `dynamic_range` (string): The dynamic range of the video
|
||||||
- `audio_channels` (numeric): The number of audio channels
|
|
||||||
- `stretched_ratio` (float): `width:height` of the video's pixels, if not square
|
- `stretched_ratio` (float): `width:height` of the video's pixels, if not square
|
||||||
- `vcodec` (string): Name of the video codec in use
|
- `vcodec` (string): Name of the video codec in use
|
||||||
- `container` (string): Name of the container format
|
- `container` (string): Name of the container format
|
||||||
@ -1444,7 +1443,7 @@ You can also use special names to select particular edge case formats:
|
|||||||
|
|
||||||
- `all`: Select **all formats** separately
|
- `all`: Select **all formats** separately
|
||||||
- `mergeall`: Select and **merge all formats** (Must be used with `--audio-multistreams`, `--video-multistreams` or both)
|
- `mergeall`: Select and **merge all formats** (Must be used with `--audio-multistreams`, `--video-multistreams` or both)
|
||||||
- `b*`, `best*`: Select the best quality format that **contains either** a video or an audio or both (ie; `vcodec!=none or acodec!=none`)
|
- `b*`, `best*`: Select the best quality format that **contains either** a video or an audio
|
||||||
- `b`, `best`: Select the best quality format that **contains both** video and audio. Equivalent to `best*[vcodec!=none][acodec!=none]`
|
- `b`, `best`: Select the best quality format that **contains both** video and audio. Equivalent to `best*[vcodec!=none][acodec!=none]`
|
||||||
- `bv`, `bestvideo`: Select the best quality **video-only** format. Equivalent to `best*[acodec=none]`
|
- `bv`, `bestvideo`: Select the best quality **video-only** format. Equivalent to `best*[acodec=none]`
|
||||||
- `bv*`, `bestvideo*`: Select the best quality format that **contains video**. It may also contain audio. Equivalent to `best*[vcodec!=none]`
|
- `bv*`, `bestvideo*`: Select the best quality format that **contains video**. It may also contain audio. Equivalent to `best*[vcodec!=none]`
|
||||||
@ -1457,7 +1456,7 @@ You can also use special names to select particular edge case formats:
|
|||||||
- `wa`, `worstaudio`: Select the worst quality audio-only format. Equivalent to `worst*[vcodec=none]`
|
- `wa`, `worstaudio`: Select the worst quality audio-only format. Equivalent to `worst*[vcodec=none]`
|
||||||
- `wa*`, `worstaudio*`: Select the worst quality format that contains audio. It may also contain video. Equivalent to `worst*[acodec!=none]`
|
- `wa*`, `worstaudio*`: Select the worst quality format that contains audio. It may also contain video. Equivalent to `worst*[acodec!=none]`
|
||||||
|
|
||||||
For example, to download the worst quality video-only format you can use `-f worstvideo`. It is however recommended not to use `worst` and related options. When your format selector is `worst`, the format which is worst in all respects is selected. Most of the time, what you actually want is the video with the smallest filesize instead. So it is generally better to use `-S +size` or more rigorously, `-S +size,+br,+res,+fps` instead of `-f worst`. See [Sorting Formats](#sorting-formats) for more details.
|
For example, to download the worst quality video-only format you can use `-f worstvideo`. It is however recommended not to use `worst` and related options. When your format selector is `worst`, the format which is worst in all respects is selected. Most of the time, what you actually want is the video with the smallest filesize instead. So it is generally better to use `-S +size` or more rigorously, `-S +size,+br,+res,+fps` instead of `-f worst`. See [sorting formats](#sorting-formats) for more details.
|
||||||
|
|
||||||
You can select the n'th best format of a type by using `best<type>.<n>`. For example, `best.2` will select the 2nd best combined format. Similarly, `bv*.3` will select the 3rd best format that contains a video stream.
|
You can select the n'th best format of a type by using `best<type>.<n>`. For example, `best.2` will select the 2nd best combined format. Similarly, `bv*.3` will select the 3rd best format that contains a video stream.
|
||||||
|
|
||||||
@ -1531,7 +1530,6 @@ The available fields are:
|
|||||||
- `res`: Video resolution, calculated as the smallest dimension.
|
- `res`: Video resolution, calculated as the smallest dimension.
|
||||||
- `fps`: Framerate of video
|
- `fps`: Framerate of video
|
||||||
- `hdr`: The dynamic range of the video (`DV` > `HDR12` > `HDR10+` > `HDR10` > `HLG` > `SDR`)
|
- `hdr`: The dynamic range of the video (`DV` > `HDR12` > `HDR10+` > `HDR10` > `HLG` > `SDR`)
|
||||||
- `channels`: The number of audio channels
|
|
||||||
- `tbr`: Total average bitrate in KBit/s
|
- `tbr`: Total average bitrate in KBit/s
|
||||||
- `vbr`: Average video bitrate in KBit/s
|
- `vbr`: Average video bitrate in KBit/s
|
||||||
- `abr`: Average audio bitrate in KBit/s
|
- `abr`: Average audio bitrate in KBit/s
|
||||||
@ -1777,7 +1775,7 @@ The following extractors use this feature:
|
|||||||
|
|
||||||
#### crunchyrollbeta
|
#### crunchyrollbeta
|
||||||
* `format`: Which stream type(s) to extract. Default is `adaptive_hls` Eg: `crunchyrollbeta:format=vo_adaptive_hls`
|
* `format`: Which stream type(s) to extract. Default is `adaptive_hls` Eg: `crunchyrollbeta:format=vo_adaptive_hls`
|
||||||
* Potentially useful values include `adaptive_hls`, `adaptive_dash`, `vo_adaptive_hls`, `vo_adaptive_dash`, `download_hls`, `download_dash`, `multitrack_adaptive_hls_v2`
|
* Potentially useful values include `adaptive_hls`, `adaptive_dash`, `vo_adaptive_hls`, `vo_adaptive_dash`, `download_hls`, `trailer_hls`, `trailer_dash`
|
||||||
* `hardsub`: Preference order for which hardsub versions to extract. Default is `None` (no hardsubs). Eg: `crunchyrollbeta:hardsub=en-US,None`
|
* `hardsub`: Preference order for which hardsub versions to extract. Default is `None` (no hardsubs). Eg: `crunchyrollbeta:hardsub=en-US,None`
|
||||||
|
|
||||||
#### vikichannel
|
#### vikichannel
|
||||||
@ -2134,7 +2132,6 @@ These options may no longer work as intended
|
|||||||
--no-include-ads Default
|
--no-include-ads Default
|
||||||
--write-annotations No supported site has annotations now
|
--write-annotations No supported site has annotations now
|
||||||
--no-write-annotations Default
|
--no-write-annotations Default
|
||||||
--compat-options seperate-video-versions No longer needed
|
|
||||||
|
|
||||||
#### Removed
|
#### Removed
|
||||||
These options were deprecated since 2014 and have now been entirely removed
|
These options were deprecated since 2014 and have now been entirely removed
|
||||||
|
@ -9,13 +9,11 @@ from ..utils import (
|
|||||||
write_string,
|
write_string,
|
||||||
)
|
)
|
||||||
|
|
||||||
# These bloat the lazy_extractors, so allow them to passthrough silently
|
|
||||||
ALLOWED_CLASSMETHODS = {'get_testcases', 'extract_from_webpage'}
|
|
||||||
|
|
||||||
|
|
||||||
class LazyLoadMetaClass(type):
|
class LazyLoadMetaClass(type):
|
||||||
def __getattr__(cls, name):
|
def __getattr__(cls, name):
|
||||||
if '_real_class' not in cls.__dict__ and name not in ALLOWED_CLASSMETHODS:
|
# "_TESTS" bloat the lazy_extractors
|
||||||
|
if '_real_class' not in cls.__dict__ and name != 'get_testcases':
|
||||||
write_string(
|
write_string(
|
||||||
'WARNING: Falling back to normal extractor since lazy extractor '
|
'WARNING: Falling back to normal extractor since lazy extractor '
|
||||||
f'{cls.__name__} does not have attribute {name}{bug_reports_message()}\n')
|
f'{cls.__name__} does not have attribute {name}{bug_reports_message()}\n')
|
||||||
|
@ -7,14 +7,20 @@ import sys
|
|||||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
|
|
||||||
|
import optparse
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from devscripts.utils import (
|
|
||||||
get_filename_args,
|
def read(fname):
|
||||||
read_file,
|
with open(fname, encoding='utf-8') as f:
|
||||||
read_version,
|
return f.read()
|
||||||
write_file,
|
|
||||||
)
|
|
||||||
|
# Get the version without importing the package
|
||||||
|
def read_version(fname):
|
||||||
|
exec(compile(read(fname), fname, 'exec'))
|
||||||
|
return locals()['__version__']
|
||||||
|
|
||||||
|
|
||||||
VERBOSE_TMPL = '''
|
VERBOSE_TMPL = '''
|
||||||
- type: checkboxes
|
- type: checkboxes
|
||||||
@ -52,24 +58,20 @@ VERBOSE_TMPL = '''
|
|||||||
required: true
|
required: true
|
||||||
'''.strip()
|
'''.strip()
|
||||||
|
|
||||||
NO_SKIP = '''
|
|
||||||
- type: checkboxes
|
|
||||||
attributes:
|
|
||||||
label: DO NOT REMOVE OR SKIP THE ISSUE TEMPLATE
|
|
||||||
description: Fill all fields even if you think it is irrelevant for the issue
|
|
||||||
options:
|
|
||||||
- label: I understand that I will be **blocked** if I remove or skip any mandatory\\* field
|
|
||||||
required: true
|
|
||||||
'''.strip()
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
fields = {'version': read_version(), 'no_skip': NO_SKIP}
|
parser = optparse.OptionParser(usage='%prog INFILE OUTFILE')
|
||||||
|
_, args = parser.parse_args()
|
||||||
|
if len(args) != 2:
|
||||||
|
parser.error('Expected an input and an output filename')
|
||||||
|
|
||||||
|
fields = {'version': read_version('yt_dlp/version.py')}
|
||||||
fields['verbose'] = VERBOSE_TMPL % fields
|
fields['verbose'] = VERBOSE_TMPL % fields
|
||||||
fields['verbose_optional'] = re.sub(r'(\n\s+validations:)?\n\s+required: true', '', fields['verbose'])
|
fields['verbose_optional'] = re.sub(r'(\n\s+validations:)?\n\s+required: true', '', fields['verbose'])
|
||||||
|
|
||||||
infile, outfile = get_filename_args(has_infile=True)
|
infile, outfile = args
|
||||||
write_file(outfile, read_file(infile) % fields)
|
with open(outfile, 'w', encoding='utf-8') as outf:
|
||||||
|
outf.write(read(infile) % fields)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
@ -7,12 +7,11 @@ import sys
|
|||||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
|
|
||||||
|
import optparse
|
||||||
from inspect import getsource
|
from inspect import getsource
|
||||||
|
|
||||||
from devscripts.utils import get_filename_args, read_file, write_file
|
|
||||||
|
|
||||||
NO_ATTR = object()
|
NO_ATTR = object()
|
||||||
STATIC_CLASS_PROPERTIES = ['IE_NAME', 'IE_DESC', 'SEARCH_KEY', '_VALID_URL', '_WORKING', '_NETRC_MACHINE', 'age_limit']
|
STATIC_CLASS_PROPERTIES = ['IE_NAME', 'IE_DESC', 'SEARCH_KEY', '_WORKING', '_NETRC_MACHINE', 'age_limit']
|
||||||
CLASS_METHODS = [
|
CLASS_METHODS = [
|
||||||
'ie_key', 'working', 'description', 'suitable', '_match_valid_url', '_match_id', 'get_temp_id', 'is_suitable'
|
'ie_key', 'working', 'description', 'suitable', '_match_valid_url', '_match_id', 'get_temp_id', 'is_suitable'
|
||||||
]
|
]
|
||||||
@ -20,11 +19,17 @@ IE_TEMPLATE = '''
|
|||||||
class {name}({bases}):
|
class {name}({bases}):
|
||||||
_module = {module!r}
|
_module = {module!r}
|
||||||
'''
|
'''
|
||||||
MODULE_TEMPLATE = read_file('devscripts/lazy_load_template.py')
|
with open('devscripts/lazy_load_template.py', encoding='utf-8') as f:
|
||||||
|
MODULE_TEMPLATE = f.read()
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
lazy_extractors_filename = get_filename_args(default_outfile='yt_dlp/extractor/lazy_extractors.py')
|
parser = optparse.OptionParser(usage='%prog [OUTFILE.py]')
|
||||||
|
args = parser.parse_args()[1] or ['yt_dlp/extractor/lazy_extractors.py']
|
||||||
|
if len(args) != 1:
|
||||||
|
parser.error('Expected only an output filename')
|
||||||
|
|
||||||
|
lazy_extractors_filename = args[0]
|
||||||
if os.path.exists(lazy_extractors_filename):
|
if os.path.exists(lazy_extractors_filename):
|
||||||
os.remove(lazy_extractors_filename)
|
os.remove(lazy_extractors_filename)
|
||||||
|
|
||||||
@ -41,7 +46,8 @@ def main():
|
|||||||
*build_ies(_ALL_CLASSES, (InfoExtractor, SearchInfoExtractor), DummyInfoExtractor),
|
*build_ies(_ALL_CLASSES, (InfoExtractor, SearchInfoExtractor), DummyInfoExtractor),
|
||||||
))
|
))
|
||||||
|
|
||||||
write_file(lazy_extractors_filename, f'{module_src}\n')
|
with open(lazy_extractors_filename, 'wt', encoding='utf-8') as f:
|
||||||
|
f.write(f'{module_src}\n')
|
||||||
|
|
||||||
|
|
||||||
def get_all_ies():
|
def get_all_ies():
|
||||||
@ -110,6 +116,11 @@ def build_lazy_ie(ie, name, attr_base):
|
|||||||
}.get(base.__name__, base.__name__) for base in ie.__bases__)
|
}.get(base.__name__, base.__name__) for base in ie.__bases__)
|
||||||
|
|
||||||
s = IE_TEMPLATE.format(name=name, module=ie.__module__, bases=bases)
|
s = IE_TEMPLATE.format(name=name, module=ie.__module__, bases=bases)
|
||||||
|
valid_url = getattr(ie, '_VALID_URL', None)
|
||||||
|
if not valid_url and hasattr(ie, '_make_valid_url'):
|
||||||
|
valid_url = ie._make_valid_url()
|
||||||
|
if valid_url:
|
||||||
|
s += f' _VALID_URL = {valid_url!r}\n'
|
||||||
return s + '\n'.join(extra_ie_code(ie, attr_base))
|
return s + '\n'.join(extra_ie_code(ie, attr_base))
|
||||||
|
|
||||||
|
|
||||||
|
@ -5,17 +5,10 @@ yt-dlp --help | make_readme.py
|
|||||||
This must be run in a console of correct width
|
This must be run in a console of correct width
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# Allow direct execution
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
|
|
||||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
|
||||||
|
|
||||||
|
|
||||||
import functools
|
import functools
|
||||||
import re
|
import re
|
||||||
|
import sys
|
||||||
from devscripts.utils import read_file, write_file
|
|
||||||
|
|
||||||
README_FILE = 'README.md'
|
README_FILE = 'README.md'
|
||||||
|
|
||||||
@ -45,10 +38,6 @@ switch_col_width = len(re.search(r'(?m)^\s{5,}', options).group())
|
|||||||
delim = f'\n{" " * switch_col_width}'
|
delim = f'\n{" " * switch_col_width}'
|
||||||
|
|
||||||
PATCHES = (
|
PATCHES = (
|
||||||
( # Standardize update message
|
|
||||||
r'(?m)^( -U, --update\s+).+(\n \s.+)*$',
|
|
||||||
r'\1Update this program to the latest version',
|
|
||||||
),
|
|
||||||
( # Headings
|
( # Headings
|
||||||
r'(?m)^ (\w.+\n)( (?=\w))?',
|
r'(?m)^ (\w.+\n)( (?=\w))?',
|
||||||
r'## \1'
|
r'## \1'
|
||||||
@ -74,9 +63,11 @@ PATCHES = (
|
|||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
readme = read_file(README_FILE)
|
with open(README_FILE, encoding='utf-8') as f:
|
||||||
|
readme = f.read()
|
||||||
|
|
||||||
write_file(README_FILE, ''.join((
|
with open(README_FILE, 'w', encoding='utf-8') as f:
|
||||||
|
f.write(''.join((
|
||||||
take_section(readme, end=f'## {OPTIONS_START}'),
|
take_section(readme, end=f'## {OPTIONS_START}'),
|
||||||
functools.reduce(apply_patch, PATCHES, options),
|
functools.reduce(apply_patch, PATCHES, options),
|
||||||
take_section(readme, f'# {OPTIONS_END}'),
|
take_section(readme, f'# {OPTIONS_END}'),
|
||||||
|
@ -7,13 +7,21 @@ import sys
|
|||||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
|
|
||||||
from devscripts.utils import get_filename_args, write_file
|
import optparse
|
||||||
|
|
||||||
from yt_dlp.extractor import list_extractor_classes
|
from yt_dlp.extractor import list_extractor_classes
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
|
parser = optparse.OptionParser(usage='%prog OUTFILE.md')
|
||||||
|
_, args = parser.parse_args()
|
||||||
|
if len(args) != 1:
|
||||||
|
parser.error('Expected an output filename')
|
||||||
|
|
||||||
out = '\n'.join(ie.description() for ie in list_extractor_classes() if ie.IE_DESC is not False)
|
out = '\n'.join(ie.description() for ie in list_extractor_classes() if ie.IE_DESC is not False)
|
||||||
write_file(get_filename_args(), f'# Supported sites\n{out}\n')
|
|
||||||
|
with open(args[0], 'w', encoding='utf-8') as outf:
|
||||||
|
outf.write(f'# Supported sites\n{out}\n')
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
@ -1,22 +1,9 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
# Allow direct execution
|
import optparse
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
|
|
||||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
|
||||||
|
|
||||||
|
|
||||||
import os.path
|
import os.path
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from devscripts.utils import (
|
|
||||||
compose_functions,
|
|
||||||
get_filename_args,
|
|
||||||
read_file,
|
|
||||||
write_file,
|
|
||||||
)
|
|
||||||
|
|
||||||
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||||
README_FILE = os.path.join(ROOT_DIR, 'README.md')
|
README_FILE = os.path.join(ROOT_DIR, 'README.md')
|
||||||
|
|
||||||
@ -35,6 +22,25 @@ yt\-dlp \- A youtube-dl fork with additional features and patches
|
|||||||
'''
|
'''
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
parser = optparse.OptionParser(usage='%prog OUTFILE.md')
|
||||||
|
_, args = parser.parse_args()
|
||||||
|
if len(args) != 1:
|
||||||
|
parser.error('Expected an output filename')
|
||||||
|
|
||||||
|
outfile, = args
|
||||||
|
|
||||||
|
with open(README_FILE, encoding='utf-8') as f:
|
||||||
|
readme = f.read()
|
||||||
|
|
||||||
|
readme = filter_excluded_sections(readme)
|
||||||
|
readme = move_sections(readme)
|
||||||
|
readme = filter_options(readme)
|
||||||
|
|
||||||
|
with open(outfile, 'w', encoding='utf-8') as outf:
|
||||||
|
outf.write(PREFIX + readme)
|
||||||
|
|
||||||
|
|
||||||
def filter_excluded_sections(readme):
|
def filter_excluded_sections(readme):
|
||||||
EXCLUDED_SECTION_BEGIN_STRING = re.escape('<!-- MANPAGE: BEGIN EXCLUDED SECTION -->')
|
EXCLUDED_SECTION_BEGIN_STRING = re.escape('<!-- MANPAGE: BEGIN EXCLUDED SECTION -->')
|
||||||
EXCLUDED_SECTION_END_STRING = re.escape('<!-- MANPAGE: END EXCLUDED SECTION -->')
|
EXCLUDED_SECTION_END_STRING = re.escape('<!-- MANPAGE: END EXCLUDED SECTION -->')
|
||||||
@ -86,12 +92,5 @@ def filter_options(readme):
|
|||||||
return readme.replace(section, options, 1)
|
return readme.replace(section, options, 1)
|
||||||
|
|
||||||
|
|
||||||
TRANSFORM = compose_functions(filter_excluded_sections, move_sections, filter_options)
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
write_file(get_filename_args(), PREFIX + TRANSFORM(read_file(README_FILE)))
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
main()
|
main()
|
||||||
|
@ -1,36 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
|
|
||||||
# Allow direct execution
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
|
|
||||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
|
||||||
|
|
||||||
|
|
||||||
import argparse
|
|
||||||
import functools
|
|
||||||
import re
|
|
||||||
|
|
||||||
from devscripts.utils import compose_functions, read_file, write_file
|
|
||||||
|
|
||||||
VERSION_FILE = 'yt_dlp/version.py'
|
|
||||||
|
|
||||||
|
|
||||||
def parse_options():
|
|
||||||
parser = argparse.ArgumentParser(description='Set the build variant of the package')
|
|
||||||
parser.add_argument('variant', help='Name of the variant')
|
|
||||||
parser.add_argument('-M', '--update-message', default=None, help='Message to show in -U')
|
|
||||||
return parser.parse_args()
|
|
||||||
|
|
||||||
|
|
||||||
def property_setter(name, value):
|
|
||||||
return functools.partial(re.sub, rf'(?m)^{name}\s*=\s*.+$', f'{name} = {value!r}')
|
|
||||||
|
|
||||||
|
|
||||||
opts = parse_options()
|
|
||||||
transform = compose_functions(
|
|
||||||
property_setter('VARIANT', opts.variant),
|
|
||||||
property_setter('UPDATE_HINT', opts.update_message)
|
|
||||||
)
|
|
||||||
|
|
||||||
write_file(VERSION_FILE, transform(read_file(VERSION_FILE)))
|
|
@ -1,10 +1,5 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
"""
|
|
||||||
Usage: python3 ./devscripts/update-formulae.py <path-to-formulae-rb> <version>
|
|
||||||
version can be either 0-aligned (yt-dlp version) or normalized (PyPi version)
|
|
||||||
"""
|
|
||||||
|
|
||||||
# Allow direct execution
|
# Allow direct execution
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
@ -16,7 +11,8 @@ import json
|
|||||||
import re
|
import re
|
||||||
import urllib.request
|
import urllib.request
|
||||||
|
|
||||||
from devscripts.utils import read_file, write_file
|
# usage: python3 ./devscripts/update-formulae.py <path-to-formulae-rb> <version>
|
||||||
|
# version can be either 0-aligned (yt-dlp version) or normalized (PyPl version)
|
||||||
|
|
||||||
filename, version = sys.argv[1:]
|
filename, version = sys.argv[1:]
|
||||||
|
|
||||||
@ -31,9 +27,11 @@ tarball_file = next(x for x in pypi_release['urls'] if x['filename'].endswith('.
|
|||||||
sha256sum = tarball_file['digests']['sha256']
|
sha256sum = tarball_file['digests']['sha256']
|
||||||
url = tarball_file['url']
|
url = tarball_file['url']
|
||||||
|
|
||||||
formulae_text = read_file(filename)
|
with open(filename) as r:
|
||||||
|
formulae_text = r.read()
|
||||||
|
|
||||||
formulae_text = re.sub(r'sha256 "[0-9a-f]*?"', 'sha256 "%s"' % sha256sum, formulae_text, count=1)
|
formulae_text = re.sub(r'sha256 "[0-9a-f]*?"', 'sha256 "%s"' % sha256sum, formulae_text, count=1)
|
||||||
formulae_text = re.sub(r'url "[^"]*?"', 'url "%s"' % url, formulae_text, count=1)
|
formulae_text = re.sub(r'url "[^"]*?"', 'url "%s"' % url, formulae_text, count=1)
|
||||||
|
|
||||||
write_file(filename, formulae_text)
|
with open(filename, 'w') as w:
|
||||||
|
w.write(formulae_text)
|
||||||
|
@ -7,35 +7,32 @@ import sys
|
|||||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
|
|
||||||
import contextlib
|
|
||||||
import subprocess
|
import subprocess
|
||||||
import sys
|
import sys
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
|
|
||||||
from devscripts.utils import read_version, write_file
|
with open('yt_dlp/version.py') as f:
|
||||||
|
exec(compile(f.read(), 'yt_dlp/version.py', 'exec'))
|
||||||
|
old_version = locals()['__version__']
|
||||||
|
|
||||||
|
old_version_list = old_version.split('.')
|
||||||
|
|
||||||
def get_new_version(revision):
|
old_ver = '.'.join(old_version_list[:3])
|
||||||
version = datetime.utcnow().strftime('%Y.%m.%d')
|
old_rev = old_version_list[3] if len(old_version_list) > 3 else ''
|
||||||
|
|
||||||
if revision:
|
ver = datetime.utcnow().strftime("%Y.%m.%d")
|
||||||
assert revision.isdigit(), 'Revision must be a number'
|
|
||||||
else:
|
|
||||||
old_version = read_version().split('.')
|
|
||||||
if version.split('.') == old_version[:3]:
|
|
||||||
revision = str(int((old_version + [0])[3]) + 1)
|
|
||||||
|
|
||||||
return f'{version}.{revision}' if revision else version
|
rev = (sys.argv[1:] or [''])[0] # Use first argument, if present as revision number
|
||||||
|
if not rev:
|
||||||
|
rev = str(int(old_rev or 0) + 1) if old_ver == ver else ''
|
||||||
|
|
||||||
|
VERSION = '.'.join((ver, rev)) if rev else ver
|
||||||
|
|
||||||
def get_git_head():
|
try:
|
||||||
with contextlib.suppress(Exception):
|
|
||||||
sp = subprocess.Popen(['git', 'rev-parse', '--short', 'HEAD'], stdout=subprocess.PIPE)
|
sp = subprocess.Popen(['git', 'rev-parse', '--short', 'HEAD'], stdout=subprocess.PIPE)
|
||||||
return sp.communicate()[0].decode().strip() or None
|
GIT_HEAD = sp.communicate()[0].decode().strip() or None
|
||||||
|
except Exception:
|
||||||
|
GIT_HEAD = None
|
||||||
VERSION = get_new_version((sys.argv + [''])[1])
|
|
||||||
GIT_HEAD = get_git_head()
|
|
||||||
|
|
||||||
VERSION_FILE = f'''\
|
VERSION_FILE = f'''\
|
||||||
# Autogenerated by devscripts/update-version.py
|
# Autogenerated by devscripts/update-version.py
|
||||||
@ -43,12 +40,10 @@ VERSION_FILE = f'''\
|
|||||||
__version__ = {VERSION!r}
|
__version__ = {VERSION!r}
|
||||||
|
|
||||||
RELEASE_GIT_HEAD = {GIT_HEAD!r}
|
RELEASE_GIT_HEAD = {GIT_HEAD!r}
|
||||||
|
|
||||||
VARIANT = None
|
|
||||||
|
|
||||||
UPDATE_HINT = None
|
|
||||||
'''
|
'''
|
||||||
|
|
||||||
write_file('yt_dlp/version.py', VERSION_FILE)
|
with open('yt_dlp/version.py', 'wt') as f:
|
||||||
print(f'::set-output name=ytdlp_version::{VERSION}')
|
f.write(VERSION_FILE)
|
||||||
|
|
||||||
|
print('::set-output name=ytdlp_version::' + VERSION)
|
||||||
print(f'\nVersion = {VERSION}, Git HEAD = {GIT_HEAD}')
|
print(f'\nVersion = {VERSION}, Git HEAD = {GIT_HEAD}')
|
||||||
|
@ -1,35 +0,0 @@
|
|||||||
import argparse
|
|
||||||
import functools
|
|
||||||
|
|
||||||
|
|
||||||
def read_file(fname):
|
|
||||||
with open(fname, encoding='utf-8') as f:
|
|
||||||
return f.read()
|
|
||||||
|
|
||||||
|
|
||||||
def write_file(fname, content):
|
|
||||||
with open(fname, 'w', encoding='utf-8') as f:
|
|
||||||
return f.write(content)
|
|
||||||
|
|
||||||
|
|
||||||
# Get the version without importing the package
|
|
||||||
def read_version(fname='yt_dlp/version.py'):
|
|
||||||
exec(compile(read_file(fname), fname, 'exec'))
|
|
||||||
return locals()['__version__']
|
|
||||||
|
|
||||||
|
|
||||||
def get_filename_args(has_infile=False, default_outfile=None):
|
|
||||||
parser = argparse.ArgumentParser()
|
|
||||||
if has_infile:
|
|
||||||
parser.add_argument('infile', help='Input file')
|
|
||||||
kwargs = {'nargs': '?', 'default': default_outfile} if default_outfile else {}
|
|
||||||
parser.add_argument('outfile', **kwargs, help='Output file')
|
|
||||||
|
|
||||||
opts = parser.parse_args()
|
|
||||||
if has_infile:
|
|
||||||
return opts.infile, opts.outfile
|
|
||||||
return opts.outfile
|
|
||||||
|
|
||||||
|
|
||||||
def compose_functions(*functions):
|
|
||||||
return lambda x: functools.reduce(lambda y, f: f(y), functions, x)
|
|
18
pyinst.py
18
pyinst.py
@ -1,17 +1,11 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
# Allow direct execution
|
|
||||||
import os
|
import os
|
||||||
|
import platform
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
|
|
||||||
|
|
||||||
import platform
|
|
||||||
|
|
||||||
from PyInstaller.__main__ import run as run_pyinstaller
|
from PyInstaller.__main__ import run as run_pyinstaller
|
||||||
|
|
||||||
from devscripts.utils import read_version
|
|
||||||
|
|
||||||
OS_NAME, MACHINE, ARCH = sys.platform, platform.machine(), platform.architecture()[0][:2]
|
OS_NAME, MACHINE, ARCH = sys.platform, platform.machine(), platform.architecture()[0][:2]
|
||||||
if MACHINE in ('x86_64', 'AMD64') or ('i' in MACHINE and '86' in MACHINE):
|
if MACHINE in ('x86_64', 'AMD64') or ('i' in MACHINE and '86' in MACHINE):
|
||||||
# NB: Windows x86 has MACHINE = AMD64 irrespective of bitness
|
# NB: Windows x86 has MACHINE = AMD64 irrespective of bitness
|
||||||
@ -19,7 +13,8 @@ if MACHINE in ('x86_64', 'AMD64') or ('i' in MACHINE and '86' in MACHINE):
|
|||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
opts, version = parse_options(), read_version()
|
opts = parse_options()
|
||||||
|
version = read_version('yt_dlp/version.py')
|
||||||
|
|
||||||
onedir = '--onedir' in opts or '-D' in opts
|
onedir = '--onedir' in opts or '-D' in opts
|
||||||
if not onedir and '-F' not in opts and '--onefile' not in opts:
|
if not onedir and '-F' not in opts and '--onefile' not in opts:
|
||||||
@ -58,6 +53,13 @@ def parse_options():
|
|||||||
return opts
|
return opts
|
||||||
|
|
||||||
|
|
||||||
|
# Get the version from yt_dlp/version.py without importing the package
|
||||||
|
def read_version(fname):
|
||||||
|
with open(fname, encoding='utf-8') as f:
|
||||||
|
exec(compile(f.read(), fname, 'exec'))
|
||||||
|
return locals()['__version__']
|
||||||
|
|
||||||
|
|
||||||
def exe(onedir):
|
def exe(onedir):
|
||||||
"""@returns (name, path)"""
|
"""@returns (name, path)"""
|
||||||
name = '_'.join(filter(None, (
|
name = '_'.join(filter(None, (
|
||||||
|
@ -31,7 +31,7 @@ setenv =
|
|||||||
|
|
||||||
|
|
||||||
[isort]
|
[isort]
|
||||||
py_version = 37
|
py_version = 36
|
||||||
multi_line_output = VERTICAL_HANGING_INDENT
|
multi_line_output = VERTICAL_HANGING_INDENT
|
||||||
line_length = 80
|
line_length = 80
|
||||||
reverse_relative = true
|
reverse_relative = true
|
||||||
|
23
setup.py
23
setup.py
@ -12,18 +12,28 @@ except ImportError:
|
|||||||
from distutils.core import Command, setup
|
from distutils.core import Command, setup
|
||||||
setuptools_available = False
|
setuptools_available = False
|
||||||
|
|
||||||
from devscripts.utils import read_file, read_version
|
|
||||||
|
|
||||||
VERSION = read_version()
|
def read(fname):
|
||||||
|
with open(fname, encoding='utf-8') as f:
|
||||||
|
return f.read()
|
||||||
|
|
||||||
|
|
||||||
|
# Get the version from yt_dlp/version.py without importing the package
|
||||||
|
def read_version(fname):
|
||||||
|
exec(compile(read(fname), fname, 'exec'))
|
||||||
|
return locals()['__version__']
|
||||||
|
|
||||||
|
|
||||||
|
VERSION = read_version('yt_dlp/version.py')
|
||||||
|
|
||||||
DESCRIPTION = 'A youtube-dl fork with additional features and patches'
|
DESCRIPTION = 'A youtube-dl fork with additional features and patches'
|
||||||
|
|
||||||
LONG_DESCRIPTION = '\n\n'.join((
|
LONG_DESCRIPTION = '\n\n'.join((
|
||||||
'Official repository: <https://github.com/yt-dlp/yt-dlp>',
|
'Official repository: <https://github.com/yt-dlp/yt-dlp>',
|
||||||
'**PS**: Some links in this document will not work since this is a copy of the README.md from Github',
|
'**PS**: Some links in this document will not work since this is a copy of the README.md from Github',
|
||||||
read_file('README.md')))
|
read('README.md')))
|
||||||
|
|
||||||
REQUIREMENTS = read_file('requirements.txt').splitlines()
|
REQUIREMENTS = read('requirements.txt').splitlines()
|
||||||
|
|
||||||
|
|
||||||
def packages():
|
def packages():
|
||||||
@ -111,7 +121,7 @@ class build_lazy_extractors(Command):
|
|||||||
if self.dry_run:
|
if self.dry_run:
|
||||||
print('Skipping build of lazy extractors in dry run mode')
|
print('Skipping build of lazy extractors in dry run mode')
|
||||||
return
|
return
|
||||||
subprocess.run([sys.executable, 'devscripts/make_lazy_extractors.py'])
|
subprocess.run([sys.executable, 'devscripts/make_lazy_extractors.py', 'yt_dlp/extractor/lazy_extractors.py'])
|
||||||
|
|
||||||
|
|
||||||
params = py2exe_params() if sys.argv[1:2] == ['py2exe'] else build_params()
|
params = py2exe_params() if sys.argv[1:2] == ['py2exe'] else build_params()
|
||||||
@ -126,7 +136,7 @@ setup(
|
|||||||
url='https://github.com/yt-dlp/yt-dlp',
|
url='https://github.com/yt-dlp/yt-dlp',
|
||||||
packages=packages(),
|
packages=packages(),
|
||||||
install_requires=REQUIREMENTS,
|
install_requires=REQUIREMENTS,
|
||||||
python_requires='>=3.7',
|
python_requires='>=3.6',
|
||||||
project_urls={
|
project_urls={
|
||||||
'Documentation': 'https://github.com/yt-dlp/yt-dlp#readme',
|
'Documentation': 'https://github.com/yt-dlp/yt-dlp#readme',
|
||||||
'Source': 'https://github.com/yt-dlp/yt-dlp',
|
'Source': 'https://github.com/yt-dlp/yt-dlp',
|
||||||
@ -138,6 +148,7 @@ setup(
|
|||||||
'Development Status :: 5 - Production/Stable',
|
'Development Status :: 5 - Production/Stable',
|
||||||
'Environment :: Console',
|
'Environment :: Console',
|
||||||
'Programming Language :: Python',
|
'Programming Language :: Python',
|
||||||
|
'Programming Language :: Python :: 3.6',
|
||||||
'Programming Language :: Python :: 3.7',
|
'Programming Language :: Python :: 3.7',
|
||||||
'Programming Language :: Python :: 3.8',
|
'Programming Language :: Python :: 3.8',
|
||||||
'Programming Language :: Python :: 3.9',
|
'Programming Language :: Python :: 3.9',
|
||||||
|
@ -18,7 +18,7 @@
|
|||||||
- **8tracks**
|
- **8tracks**
|
||||||
- **91porn**
|
- **91porn**
|
||||||
- **9c9media**
|
- **9c9media**
|
||||||
- **9gag**: 9GAG
|
- **9gag**
|
||||||
- **9now.com.au**
|
- **9now.com.au**
|
||||||
- **abc.net.au**
|
- **abc.net.au**
|
||||||
- **abc.net.au:iview**
|
- **abc.net.au:iview**
|
||||||
@ -64,7 +64,6 @@
|
|||||||
- **AmericasTestKitchenSeason**
|
- **AmericasTestKitchenSeason**
|
||||||
- **AmHistoryChannel**
|
- **AmHistoryChannel**
|
||||||
- **anderetijden**: npo.nl, ntr.nl, omroepwnl.nl, zapp.nl and npo3.nl
|
- **anderetijden**: npo.nl, ntr.nl, omroepwnl.nl, zapp.nl and npo3.nl
|
||||||
- **Angel**
|
|
||||||
- **AnimalPlanet**
|
- **AnimalPlanet**
|
||||||
- **AnimeOnDemand**: [<abbr title="netrc machine"><em>animeondemand</em></abbr>]
|
- **AnimeOnDemand**: [<abbr title="netrc machine"><em>animeondemand</em></abbr>]
|
||||||
- **ant1newsgr:article**: ant1news.gr articles
|
- **ant1newsgr:article**: ant1news.gr articles
|
||||||
@ -188,7 +187,6 @@
|
|||||||
- **Camdemy**
|
- **Camdemy**
|
||||||
- **CamdemyFolder**
|
- **CamdemyFolder**
|
||||||
- **CamModels**
|
- **CamModels**
|
||||||
- **CamtasiaEmbed**
|
|
||||||
- **CamWithHer**
|
- **CamWithHer**
|
||||||
- **CanalAlpha**
|
- **CanalAlpha**
|
||||||
- **canalc2.tv**
|
- **canalc2.tv**
|
||||||
@ -234,7 +232,6 @@
|
|||||||
- **Clippit**
|
- **Clippit**
|
||||||
- **ClipRs**
|
- **ClipRs**
|
||||||
- **Clipsyndicate**
|
- **Clipsyndicate**
|
||||||
- **ClipYouEmbed**
|
|
||||||
- **CloserToTruth**
|
- **CloserToTruth**
|
||||||
- **CloudflareStream**
|
- **CloudflareStream**
|
||||||
- **Cloudy**
|
- **Cloudy**
|
||||||
@ -476,7 +473,6 @@
|
|||||||
- **gronkh:feed**
|
- **gronkh:feed**
|
||||||
- **gronkh:vods**
|
- **gronkh:vods**
|
||||||
- **Groupon**
|
- **Groupon**
|
||||||
- **Harpodeon**
|
|
||||||
- **hbo**
|
- **hbo**
|
||||||
- **HearThisAt**
|
- **HearThisAt**
|
||||||
- **Heise**
|
- **Heise**
|
||||||
@ -495,7 +491,6 @@
|
|||||||
- **hitbox:live**
|
- **hitbox:live**
|
||||||
- **HitRecord**
|
- **HitRecord**
|
||||||
- **hketv**: 香港教育局教育電視 (HKETV) Educational Television, Hong Kong Educational Bureau
|
- **hketv**: 香港教育局教育電視 (HKETV) Educational Television, Hong Kong Educational Bureau
|
||||||
- **Holodex**
|
|
||||||
- **HotNewHipHop**
|
- **HotNewHipHop**
|
||||||
- **hotstar**
|
- **hotstar**
|
||||||
- **hotstar:playlist**
|
- **hotstar:playlist**
|
||||||
@ -507,7 +502,6 @@
|
|||||||
- **HRTiPlaylist**: [<abbr title="netrc machine"><em>hrti</em></abbr>]
|
- **HRTiPlaylist**: [<abbr title="netrc machine"><em>hrti</em></abbr>]
|
||||||
- **HSEProduct**
|
- **HSEProduct**
|
||||||
- **HSEShow**
|
- **HSEShow**
|
||||||
- **html5**
|
|
||||||
- **Huajiao**: 花椒直播
|
- **Huajiao**: 花椒直播
|
||||||
- **HuffPost**: Huffington Post
|
- **HuffPost**: Huffington Post
|
||||||
- **Hungama**
|
- **Hungama**
|
||||||
@ -579,7 +573,6 @@
|
|||||||
- **KickStarter**
|
- **KickStarter**
|
||||||
- **KinjaEmbed**
|
- **KinjaEmbed**
|
||||||
- **KinoPoisk**
|
- **KinoPoisk**
|
||||||
- **KompasVideo**
|
|
||||||
- **KonserthusetPlay**
|
- **KonserthusetPlay**
|
||||||
- **Koo**
|
- **Koo**
|
||||||
- **KrasView**: Красвью
|
- **KrasView**: Красвью
|
||||||
@ -722,7 +715,6 @@
|
|||||||
- **Motherless**
|
- **Motherless**
|
||||||
- **MotherlessGroup**
|
- **MotherlessGroup**
|
||||||
- **Motorsport**: motorsport.com
|
- **Motorsport**: motorsport.com
|
||||||
- **MotorTrend**
|
|
||||||
- **MovieClips**
|
- **MovieClips**
|
||||||
- **MovieFap**
|
- **MovieFap**
|
||||||
- **Moviepilot**
|
- **Moviepilot**
|
||||||
@ -898,10 +890,21 @@
|
|||||||
- **openrec:capture**
|
- **openrec:capture**
|
||||||
- **openrec:movie**
|
- **openrec:movie**
|
||||||
- **OraTV**
|
- **OraTV**
|
||||||
|
- **orf:burgenland**: Radio Burgenland
|
||||||
|
- **orf:fm4**: radio FM4
|
||||||
- **orf:fm4:story**: fm4.orf.at stories
|
- **orf:fm4:story**: fm4.orf.at stories
|
||||||
- **orf:iptv**: iptv.ORF.at
|
- **orf:iptv**: iptv.ORF.at
|
||||||
- **orf:radio**
|
- **orf:kaernten**: Radio Kärnten
|
||||||
|
- **orf:noe**: Radio Niederösterreich
|
||||||
|
- **orf:oberoesterreich**: Radio Oberösterreich
|
||||||
|
- **orf:oe1**: Radio Österreich 1
|
||||||
|
- **orf:oe3**: Radio Österreich 3
|
||||||
|
- **orf:salzburg**: Radio Salzburg
|
||||||
|
- **orf:steiermark**: Radio Steiermark
|
||||||
|
- **orf:tirol**: Radio Tirol
|
||||||
- **orf:tvthek**: ORF TVthek
|
- **orf:tvthek**: ORF TVthek
|
||||||
|
- **orf:vorarlberg**: Radio Vorarlberg
|
||||||
|
- **orf:wien**: Radio Wien
|
||||||
- **OsnatelTV**: [<abbr title="netrc machine"><em>osnateltv</em></abbr>]
|
- **OsnatelTV**: [<abbr title="netrc machine"><em>osnateltv</em></abbr>]
|
||||||
- **OutsideTV**
|
- **OutsideTV**
|
||||||
- **PacktPub**: [<abbr title="netrc machine"><em>packtpub</em></abbr>]
|
- **PacktPub**: [<abbr title="netrc machine"><em>packtpub</em></abbr>]
|
||||||
@ -919,7 +922,7 @@
|
|||||||
- **parliamentlive.tv**: UK parliament videos
|
- **parliamentlive.tv**: UK parliament videos
|
||||||
- **Parlview**
|
- **Parlview**
|
||||||
- **Patreon**
|
- **Patreon**
|
||||||
- **PatreonCampaign**
|
- **PatreonUser**
|
||||||
- **pbs**: Public Broadcasting Service (PBS) and member stations: PBS: Public Broadcasting Service, APT - Alabama Public Television (WBIQ), GPB/Georgia Public Broadcasting (WGTV), Mississippi Public Broadcasting (WMPN), Nashville Public Television (WNPT), WFSU-TV (WFSU), WSRE (WSRE), WTCI (WTCI), WPBA/Channel 30 (WPBA), Alaska Public Media (KAKM), Arizona PBS (KAET), KNME-TV/Channel 5 (KNME), Vegas PBS (KLVX), AETN/ARKANSAS ETV NETWORK (KETS), KET (WKLE), WKNO/Channel 10 (WKNO), LPB/LOUISIANA PUBLIC BROADCASTING (WLPB), OETA (KETA), Ozarks Public Television (KOZK), WSIU Public Broadcasting (WSIU), KEET TV (KEET), KIXE/Channel 9 (KIXE), KPBS San Diego (KPBS), KQED (KQED), KVIE Public Television (KVIE), PBS SoCal/KOCE (KOCE), ValleyPBS (KVPT), CONNECTICUT PUBLIC TELEVISION (WEDH), KNPB Channel 5 (KNPB), SOPTV (KSYS), Rocky Mountain PBS (KRMA), KENW-TV3 (KENW), KUED Channel 7 (KUED), Wyoming PBS (KCWC), Colorado Public Television / KBDI 12 (KBDI), KBYU-TV (KBYU), Thirteen/WNET New York (WNET), WGBH/Channel 2 (WGBH), WGBY (WGBY), NJTV Public Media NJ (WNJT), WLIW21 (WLIW), mpt/Maryland Public Television (WMPB), WETA Television and Radio (WETA), WHYY (WHYY), PBS 39 (WLVT), WVPT - Your Source for PBS and More! (WVPT), Howard University Television (WHUT), WEDU PBS (WEDU), WGCU Public Media (WGCU), WPBT2 (WPBT), WUCF TV (WUCF), WUFT/Channel 5 (WUFT), WXEL/Channel 42 (WXEL), WLRN/Channel 17 (WLRN), WUSF Public Broadcasting (WUSF), ETV (WRLK), UNC-TV (WUNC), PBS Hawaii - Oceanic Cable Channel 10 (KHET), Idaho Public Television (KAID), KSPS (KSPS), OPB (KOPB), KWSU/Channel 10 & KTNW/Channel 31 (KWSU), WILL-TV (WILL), Network Knowledge - WSEC/Springfield (WSEC), WTTW11 (WTTW), Iowa Public Television/IPTV (KDIN), Nine Network (KETC), PBS39 Fort Wayne (WFWA), WFYI Indianapolis (WFYI), Milwaukee Public Television (WMVS), WNIN (WNIN), WNIT Public Television (WNIT), WPT (WPNE), WVUT/Channel 22 (WVUT), WEIU/Channel 51 (WEIU), WQPT-TV (WQPT), WYCC PBS Chicago (WYCC), WIPB-TV (WIPB), WTIU (WTIU), CET (WCET), ThinkTVNetwork (WPTD), WBGU-TV (WBGU), WGVU TV (WGVU), NET1 (KUON), Pioneer Public Television (KWCM), SDPB Television (KUSD), TPT (KTCA), KSMQ (KSMQ), KPTS/Channel 8 (KPTS), KTWU/Channel 11 (KTWU), East Tennessee PBS (WSJK), WCTE-TV (WCTE), WLJT, Channel 11 (WLJT), WOSU TV (WOSU), WOUB/WOUC (WOUB), WVPB (WVPB), WKYU-PBS (WKYU), KERA 13 (KERA), MPBN (WCBB), Mountain Lake PBS (WCFE), NHPTV (WENH), Vermont PBS (WETK), witf (WITF), WQED Multimedia (WQED), WMHT Educational Telecommunications (WMHT), Q-TV (WDCQ), WTVS Detroit Public TV (WTVS), CMU Public Television (WCMU), WKAR-TV (WKAR), WNMU-TV Public TV 13 (WNMU), WDSE - WRPT (WDSE), WGTE TV (WGTE), Lakeland Public Television (KAWE), KMOS-TV - Channels 6.1, 6.2 and 6.3 (KMOS), MontanaPBS (KUSM), KRWG/Channel 22 (KRWG), KACV (KACV), KCOS/Channel 13 (KCOS), WCNY/Channel 24 (WCNY), WNED (WNED), WPBS (WPBS), WSKG Public TV (WSKG), WXXI (WXXI), WPSU (WPSU), WVIA Public Media Studios (WVIA), WTVI (WTVI), Western Reserve PBS (WNEO), WVIZ/PBS ideastream (WVIZ), KCTS 9 (KCTS), Basin PBS (KPBT), KUHT / Channel 8 (KUHT), KLRN (KLRN), KLRU (KLRU), WTJX Channel 12 (WTJX), WCVE PBS (WCVE), KBTC Public Television (KBTC)
|
- **pbs**: Public Broadcasting Service (PBS) and member stations: PBS: Public Broadcasting Service, APT - Alabama Public Television (WBIQ), GPB/Georgia Public Broadcasting (WGTV), Mississippi Public Broadcasting (WMPN), Nashville Public Television (WNPT), WFSU-TV (WFSU), WSRE (WSRE), WTCI (WTCI), WPBA/Channel 30 (WPBA), Alaska Public Media (KAKM), Arizona PBS (KAET), KNME-TV/Channel 5 (KNME), Vegas PBS (KLVX), AETN/ARKANSAS ETV NETWORK (KETS), KET (WKLE), WKNO/Channel 10 (WKNO), LPB/LOUISIANA PUBLIC BROADCASTING (WLPB), OETA (KETA), Ozarks Public Television (KOZK), WSIU Public Broadcasting (WSIU), KEET TV (KEET), KIXE/Channel 9 (KIXE), KPBS San Diego (KPBS), KQED (KQED), KVIE Public Television (KVIE), PBS SoCal/KOCE (KOCE), ValleyPBS (KVPT), CONNECTICUT PUBLIC TELEVISION (WEDH), KNPB Channel 5 (KNPB), SOPTV (KSYS), Rocky Mountain PBS (KRMA), KENW-TV3 (KENW), KUED Channel 7 (KUED), Wyoming PBS (KCWC), Colorado Public Television / KBDI 12 (KBDI), KBYU-TV (KBYU), Thirteen/WNET New York (WNET), WGBH/Channel 2 (WGBH), WGBY (WGBY), NJTV Public Media NJ (WNJT), WLIW21 (WLIW), mpt/Maryland Public Television (WMPB), WETA Television and Radio (WETA), WHYY (WHYY), PBS 39 (WLVT), WVPT - Your Source for PBS and More! (WVPT), Howard University Television (WHUT), WEDU PBS (WEDU), WGCU Public Media (WGCU), WPBT2 (WPBT), WUCF TV (WUCF), WUFT/Channel 5 (WUFT), WXEL/Channel 42 (WXEL), WLRN/Channel 17 (WLRN), WUSF Public Broadcasting (WUSF), ETV (WRLK), UNC-TV (WUNC), PBS Hawaii - Oceanic Cable Channel 10 (KHET), Idaho Public Television (KAID), KSPS (KSPS), OPB (KOPB), KWSU/Channel 10 & KTNW/Channel 31 (KWSU), WILL-TV (WILL), Network Knowledge - WSEC/Springfield (WSEC), WTTW11 (WTTW), Iowa Public Television/IPTV (KDIN), Nine Network (KETC), PBS39 Fort Wayne (WFWA), WFYI Indianapolis (WFYI), Milwaukee Public Television (WMVS), WNIN (WNIN), WNIT Public Television (WNIT), WPT (WPNE), WVUT/Channel 22 (WVUT), WEIU/Channel 51 (WEIU), WQPT-TV (WQPT), WYCC PBS Chicago (WYCC), WIPB-TV (WIPB), WTIU (WTIU), CET (WCET), ThinkTVNetwork (WPTD), WBGU-TV (WBGU), WGVU TV (WGVU), NET1 (KUON), Pioneer Public Television (KWCM), SDPB Television (KUSD), TPT (KTCA), KSMQ (KSMQ), KPTS/Channel 8 (KPTS), KTWU/Channel 11 (KTWU), East Tennessee PBS (WSJK), WCTE-TV (WCTE), WLJT, Channel 11 (WLJT), WOSU TV (WOSU), WOUB/WOUC (WOUB), WVPB (WVPB), WKYU-PBS (WKYU), KERA 13 (KERA), MPBN (WCBB), Mountain Lake PBS (WCFE), NHPTV (WENH), Vermont PBS (WETK), witf (WITF), WQED Multimedia (WQED), WMHT Educational Telecommunications (WMHT), Q-TV (WDCQ), WTVS Detroit Public TV (WTVS), CMU Public Television (WCMU), WKAR-TV (WKAR), WNMU-TV Public TV 13 (WNMU), WDSE - WRPT (WDSE), WGTE TV (WGTE), Lakeland Public Television (KAWE), KMOS-TV - Channels 6.1, 6.2 and 6.3 (KMOS), MontanaPBS (KUSM), KRWG/Channel 22 (KRWG), KACV (KACV), KCOS/Channel 13 (KCOS), WCNY/Channel 24 (WCNY), WNED (WNED), WPBS (WPBS), WSKG Public TV (WSKG), WXXI (WXXI), WPSU (WPSU), WVIA Public Media Studios (WVIA), WTVI (WTVI), Western Reserve PBS (WNEO), WVIZ/PBS ideastream (WVIZ), KCTS 9 (KCTS), Basin PBS (KPBT), KUHT / Channel 8 (KUHT), KLRN (KLRN), KLRU (KLRU), WTJX Channel 12 (WTJX), WCVE PBS (WCVE), KBTC Public Television (KBTC)
|
||||||
- **PearVideo**
|
- **PearVideo**
|
||||||
- **PeekVids**
|
- **PeekVids**
|
||||||
@ -1027,14 +1030,12 @@
|
|||||||
- **radlive:channel**
|
- **radlive:channel**
|
||||||
- **radlive:season**
|
- **radlive:season**
|
||||||
- **Rai**
|
- **Rai**
|
||||||
- **RaiNews**
|
|
||||||
- **RaiPlay**
|
- **RaiPlay**
|
||||||
- **RaiPlayLive**
|
- **RaiPlayLive**
|
||||||
- **RaiPlayPlaylist**
|
- **RaiPlayPlaylist**
|
||||||
- **RaiPlaySound**
|
- **RaiPlaySound**
|
||||||
- **RaiPlaySoundLive**
|
- **RaiPlaySoundLive**
|
||||||
- **RaiPlaySoundPlaylist**
|
- **RaiPlaySoundPlaylist**
|
||||||
- **RaiSudtirol**
|
|
||||||
- **RayWenderlich**
|
- **RayWenderlich**
|
||||||
- **RayWenderlichCourse**
|
- **RayWenderlichCourse**
|
||||||
- **RBMARadio**
|
- **RBMARadio**
|
||||||
@ -1071,7 +1072,7 @@
|
|||||||
- **RoosterTeethSeries**: [<abbr title="netrc machine"><em>roosterteeth</em></abbr>]
|
- **RoosterTeethSeries**: [<abbr title="netrc machine"><em>roosterteeth</em></abbr>]
|
||||||
- **RottenTomatoes**
|
- **RottenTomatoes**
|
||||||
- **Rozhlas**
|
- **Rozhlas**
|
||||||
- **RTBF**: [<abbr title="netrc machine"><em>rtbf</em></abbr>]
|
- **RTBF**
|
||||||
- **RTDocumentry**
|
- **RTDocumentry**
|
||||||
- **RTDocumentryPlaylist**
|
- **RTDocumentryPlaylist**
|
||||||
- **rte**: Raidió Teilifís Éireann TV
|
- **rte**: Raidió Teilifís Éireann TV
|
||||||
@ -1143,7 +1144,6 @@
|
|||||||
- **Shahid**: [<abbr title="netrc machine"><em>shahid</em></abbr>]
|
- **Shahid**: [<abbr title="netrc machine"><em>shahid</em></abbr>]
|
||||||
- **ShahidShow**
|
- **ShahidShow**
|
||||||
- **Shared**: shared.sx
|
- **Shared**: shared.sx
|
||||||
- **ShareVideosEmbed**
|
|
||||||
- **ShemarooMe**
|
- **ShemarooMe**
|
||||||
- **ShowRoomLive**
|
- **ShowRoomLive**
|
||||||
- **simplecast**
|
- **simplecast**
|
||||||
@ -1268,7 +1268,6 @@
|
|||||||
- **TeleQuebecVideo**
|
- **TeleQuebecVideo**
|
||||||
- **TeleTask**
|
- **TeleTask**
|
||||||
- **Telewebion**
|
- **Telewebion**
|
||||||
- **Tempo**
|
|
||||||
- **TennisTV**: [<abbr title="netrc machine"><em>tennistv</em></abbr>]
|
- **TennisTV**: [<abbr title="netrc machine"><em>tennistv</em></abbr>]
|
||||||
- **TenPlay**: [<abbr title="netrc machine"><em>10play</em></abbr>]
|
- **TenPlay**: [<abbr title="netrc machine"><em>10play</em></abbr>]
|
||||||
- **TF1**
|
- **TF1**
|
||||||
|
@ -92,13 +92,6 @@ def gettestcases(include_onlymatching=False):
|
|||||||
yield from ie.get_testcases(include_onlymatching)
|
yield from ie.get_testcases(include_onlymatching)
|
||||||
|
|
||||||
|
|
||||||
def getwebpagetestcases():
|
|
||||||
for ie in yt_dlp.extractor.gen_extractors():
|
|
||||||
for tc in ie.get_webpage_testcases():
|
|
||||||
tc.setdefault('add_ie', []).append('Generic')
|
|
||||||
yield tc
|
|
||||||
|
|
||||||
|
|
||||||
md5 = lambda s: hashlib.md5(s.encode()).hexdigest()
|
md5 = lambda s: hashlib.md5(s.encode()).hexdigest()
|
||||||
|
|
||||||
|
|
||||||
|
@ -722,7 +722,7 @@ class TestYoutubeDL(unittest.TestCase):
|
|||||||
test('%(id)s', '-abcd', info={'id': '-abcd'})
|
test('%(id)s', '-abcd', info={'id': '-abcd'})
|
||||||
test('%(id)s', '.abcd', info={'id': '.abcd'})
|
test('%(id)s', '.abcd', info={'id': '.abcd'})
|
||||||
test('%(id)s', 'ab__cd', info={'id': 'ab__cd'})
|
test('%(id)s', 'ab__cd', info={'id': 'ab__cd'})
|
||||||
test('%(id)s', ('ab:cd', 'ab:cd'), info={'id': 'ab:cd'})
|
test('%(id)s', ('ab:cd', 'ab -cd'), info={'id': 'ab:cd'})
|
||||||
test('%(id.0)s', '-', info={'id': '--'})
|
test('%(id.0)s', '-', info={'id': '--'})
|
||||||
|
|
||||||
# Invalid templates
|
# Invalid templates
|
||||||
@ -770,7 +770,7 @@ class TestYoutubeDL(unittest.TestCase):
|
|||||||
test('a%(width|)d', 'a', outtmpl_na_placeholder='none')
|
test('a%(width|)d', 'a', outtmpl_na_placeholder='none')
|
||||||
|
|
||||||
FORMATS = self.outtmpl_info['formats']
|
FORMATS = self.outtmpl_info['formats']
|
||||||
sanitize = lambda x: x.replace(':', ':').replace('"', """).replace('\n', ' ')
|
sanitize = lambda x: x.replace(':', ' -').replace('"', "'").replace('\n', ' ')
|
||||||
|
|
||||||
# Custom type casting
|
# Custom type casting
|
||||||
test('%(formats.:.id)l', 'id 1, id 2, id 3')
|
test('%(formats.:.id)l', 'id 1, id 2, id 3')
|
||||||
@ -788,13 +788,13 @@ class TestYoutubeDL(unittest.TestCase):
|
|||||||
test('%(filesize)#D', '1Ki')
|
test('%(filesize)#D', '1Ki')
|
||||||
test('%(height)5.2D', ' 1.08k')
|
test('%(height)5.2D', ' 1.08k')
|
||||||
test('%(title4)#S', 'foo_bar_test')
|
test('%(title4)#S', 'foo_bar_test')
|
||||||
test('%(title4).10S', ('foo "bar" ', 'foo "bar"' + ('#' if compat_os_name == 'nt' else ' ')))
|
test('%(title4).10S', ('foo \'bar\' ', 'foo \'bar\'' + ('#' if compat_os_name == 'nt' else ' ')))
|
||||||
if compat_os_name == 'nt':
|
if compat_os_name == 'nt':
|
||||||
test('%(title4)q', ('"foo \\"bar\\" test"', ""foo ⧹"bar⧹" test""))
|
test('%(title4)q', ('"foo \\"bar\\" test"', "'foo _'bar_' test'"))
|
||||||
test('%(formats.:.id)#q', ('"id 1" "id 2" "id 3"', '"id 1" "id 2" "id 3"'))
|
test('%(formats.:.id)#q', ('"id 1" "id 2" "id 3"', "'id 1' 'id 2' 'id 3'"))
|
||||||
test('%(formats.0.id)#q', ('"id 1"', '"id 1"'))
|
test('%(formats.0.id)#q', ('"id 1"', "'id 1'"))
|
||||||
else:
|
else:
|
||||||
test('%(title4)q', ('\'foo "bar" test\'', '\'foo "bar" test\''))
|
test('%(title4)q', ('\'foo "bar" test\'', "'foo 'bar' test'"))
|
||||||
test('%(formats.:.id)#q', "'id 1' 'id 2' 'id 3'")
|
test('%(formats.:.id)#q', "'id 1' 'id 2' 'id 3'")
|
||||||
test('%(formats.0.id)#q', "'id 1'")
|
test('%(formats.0.id)#q', "'id 1'")
|
||||||
|
|
||||||
@ -852,8 +852,8 @@ class TestYoutubeDL(unittest.TestCase):
|
|||||||
# Path expansion and escaping
|
# Path expansion and escaping
|
||||||
test('Hello %(title1)s', 'Hello $PATH')
|
test('Hello %(title1)s', 'Hello $PATH')
|
||||||
test('Hello %(title2)s', 'Hello %PATH%')
|
test('Hello %(title2)s', 'Hello %PATH%')
|
||||||
test('%(title3)s', ('foo/bar\\test', 'foo⧸bar⧹test'))
|
test('%(title3)s', ('foo/bar\\test', 'foo_bar_test'))
|
||||||
test('folder/%(title3)s', ('folder/foo/bar\\test', 'folder%sfoo⧸bar⧹test' % os.path.sep))
|
test('folder/%(title3)s', ('folder/foo/bar\\test', 'folder%sfoo_bar_test' % os.path.sep))
|
||||||
|
|
||||||
def test_format_note(self):
|
def test_format_note(self):
|
||||||
ydl = YoutubeDL()
|
ydl = YoutubeDL()
|
||||||
|
@ -28,8 +28,7 @@ class TestCompat(unittest.TestCase):
|
|||||||
with self.assertWarns(DeprecationWarning):
|
with self.assertWarns(DeprecationWarning):
|
||||||
compat.WINDOWS_VT_MODE
|
compat.WINDOWS_VT_MODE
|
||||||
|
|
||||||
# TODO: Test submodule
|
compat.asyncio.events # Must not raise error
|
||||||
# compat.asyncio.events # Must not raise error
|
|
||||||
|
|
||||||
def test_compat_expanduser(self):
|
def test_compat_expanduser(self):
|
||||||
old_home = os.environ.get('HOME')
|
old_home = os.environ.get('HOME')
|
||||||
|
@ -8,7 +8,6 @@ import unittest
|
|||||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
|
|
||||||
import collections
|
|
||||||
import hashlib
|
import hashlib
|
||||||
import http.client
|
import http.client
|
||||||
import json
|
import json
|
||||||
@ -21,7 +20,6 @@ from test.helper import (
|
|||||||
expect_warnings,
|
expect_warnings,
|
||||||
get_params,
|
get_params,
|
||||||
gettestcases,
|
gettestcases,
|
||||||
getwebpagetestcases,
|
|
||||||
is_download_test,
|
is_download_test,
|
||||||
report_warning,
|
report_warning,
|
||||||
try_rm,
|
try_rm,
|
||||||
@ -34,7 +32,6 @@ from yt_dlp.utils import (
|
|||||||
ExtractorError,
|
ExtractorError,
|
||||||
UnavailableVideoError,
|
UnavailableVideoError,
|
||||||
format_bytes,
|
format_bytes,
|
||||||
join_nonempty,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
RETRIES = 3
|
RETRIES = 3
|
||||||
@ -60,9 +57,7 @@ def _file_md5(fn):
|
|||||||
return hashlib.md5(f.read()).hexdigest()
|
return hashlib.md5(f.read()).hexdigest()
|
||||||
|
|
||||||
|
|
||||||
normal_test_cases = gettestcases()
|
defs = gettestcases()
|
||||||
webpage_test_cases = getwebpagetestcases()
|
|
||||||
tests_counter = collections.defaultdict(collections.Counter)
|
|
||||||
|
|
||||||
|
|
||||||
@is_download_test
|
@is_download_test
|
||||||
@ -77,13 +72,24 @@ class TestDownload(unittest.TestCase):
|
|||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
"""Identify each test with the `add_ie` attribute, if available."""
|
"""Identify each test with the `add_ie` attribute, if available."""
|
||||||
cls, add_ie = type(self), getattr(self, self._testMethodName).add_ie
|
|
||||||
return f'{self._testMethodName} ({cls.__module__}.{cls.__name__}){f" [{add_ie}]" if add_ie else ""}:'
|
|
||||||
|
|
||||||
|
def strclass(cls):
|
||||||
|
"""From 2.7's unittest; 2.6 had _strclass so we can't import it."""
|
||||||
|
return f'{cls.__module__}.{cls.__name__}'
|
||||||
|
|
||||||
|
add_ie = getattr(self, self._testMethodName).add_ie
|
||||||
|
return '%s (%s)%s:' % (self._testMethodName,
|
||||||
|
strclass(self.__class__),
|
||||||
|
' [%s]' % add_ie if add_ie else '')
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
self.defs = defs
|
||||||
|
|
||||||
# Dynamically generate tests
|
# Dynamically generate tests
|
||||||
|
|
||||||
|
|
||||||
def generator(test_case, tname):
|
def generator(test_case, tname):
|
||||||
|
|
||||||
def test_template(self):
|
def test_template(self):
|
||||||
if self.COMPLETED_TESTS.get(tname):
|
if self.COMPLETED_TESTS.get(tname):
|
||||||
return
|
return
|
||||||
@ -249,29 +255,25 @@ def generator(test_case, tname):
|
|||||||
|
|
||||||
|
|
||||||
# And add them to TestDownload
|
# And add them to TestDownload
|
||||||
def inject_tests(test_cases, label=''):
|
tests_counter = {}
|
||||||
for test_case in test_cases:
|
for test_case in defs:
|
||||||
name = test_case['name']
|
name = test_case['name']
|
||||||
tname = join_nonempty('test', name, label, tests_counter[name][label], delim='_')
|
i = tests_counter.get(name, 0)
|
||||||
tests_counter[name][label] += 1
|
tests_counter[name] = i + 1
|
||||||
|
tname = f'test_{name}_{i}' if i else f'test_{name}'
|
||||||
test_method = generator(test_case, tname)
|
test_method = generator(test_case, tname)
|
||||||
test_method.__name__ = tname
|
test_method.__name__ = str(tname)
|
||||||
test_method.add_ie = ','.join(test_case.get('add_ie', []))
|
ie_list = test_case.get('add_ie')
|
||||||
|
test_method.add_ie = ie_list and ','.join(ie_list)
|
||||||
setattr(TestDownload, test_method.__name__, test_method)
|
setattr(TestDownload, test_method.__name__, test_method)
|
||||||
|
del test_method
|
||||||
|
|
||||||
|
|
||||||
inject_tests(normal_test_cases)
|
def batch_generator(name, num_tests):
|
||||||
|
|
||||||
# TODO: disable redirection to the IE to ensure we are actually testing the webpage extraction
|
|
||||||
inject_tests(webpage_test_cases, 'webpage')
|
|
||||||
|
|
||||||
|
|
||||||
def batch_generator(name):
|
|
||||||
def test_template(self):
|
def test_template(self):
|
||||||
for label, num_tests in tests_counter[name].items():
|
|
||||||
for i in range(num_tests):
|
for i in range(num_tests):
|
||||||
test_name = join_nonempty('test', name, label, i, delim='_')
|
test_name = f'test_{name}_{i}' if i else f'test_{name}'
|
||||||
try:
|
try:
|
||||||
getattr(self, test_name)()
|
getattr(self, test_name)()
|
||||||
except unittest.SkipTest:
|
except unittest.SkipTest:
|
||||||
@ -280,8 +282,8 @@ def batch_generator(name):
|
|||||||
return test_template
|
return test_template
|
||||||
|
|
||||||
|
|
||||||
for name in tests_counter:
|
for name, num_tests in tests_counter.items():
|
||||||
test_method = batch_generator(name)
|
test_method = batch_generator(name, num_tests)
|
||||||
test_method.__name__ = f'test_{name}_all'
|
test_method.__name__ = f'test_{name}_all'
|
||||||
test_method.add_ie = ''
|
test_method.add_ie = ''
|
||||||
setattr(TestDownload, test_method.__name__, test_method)
|
setattr(TestDownload, test_method.__name__, test_method)
|
||||||
|
@ -95,8 +95,8 @@ class TestHttpFD(unittest.TestCase):
|
|||||||
try_rm(encodeFilename(filename))
|
try_rm(encodeFilename(filename))
|
||||||
self.assertTrue(downloader.real_download(filename, {
|
self.assertTrue(downloader.real_download(filename, {
|
||||||
'url': 'http://127.0.0.1:%d/%s' % (self.port, ep),
|
'url': 'http://127.0.0.1:%d/%s' % (self.port, ep),
|
||||||
}), ep)
|
}))
|
||||||
self.assertEqual(os.path.getsize(encodeFilename(filename)), TEST_SIZE, ep)
|
self.assertEqual(os.path.getsize(encodeFilename(filename)), TEST_SIZE)
|
||||||
try_rm(encodeFilename(filename))
|
try_rm(encodeFilename(filename))
|
||||||
|
|
||||||
def download_all(self, params):
|
def download_all(self, params):
|
||||||
|
@ -85,7 +85,7 @@ class TestHTTPS(unittest.TestCase):
|
|||||||
|
|
||||||
ydl = YoutubeDL({'logger': FakeLogger(), 'nocheckcertificate': True})
|
ydl = YoutubeDL({'logger': FakeLogger(), 'nocheckcertificate': True})
|
||||||
r = ydl.extract_info('https://127.0.0.1:%d/video.html' % self.port)
|
r = ydl.extract_info('https://127.0.0.1:%d/video.html' % self.port)
|
||||||
self.assertEqual(r['url'], 'https://127.0.0.1:%d/vid.mp4' % self.port)
|
self.assertEqual(r['entries'][0]['url'], 'https://127.0.0.1:%d/vid.mp4' % self.port)
|
||||||
|
|
||||||
|
|
||||||
class TestClientCert(unittest.TestCase):
|
class TestClientCert(unittest.TestCase):
|
||||||
@ -113,7 +113,7 @@ class TestClientCert(unittest.TestCase):
|
|||||||
**params,
|
**params,
|
||||||
})
|
})
|
||||||
r = ydl.extract_info('https://127.0.0.1:%d/video.html' % self.port)
|
r = ydl.extract_info('https://127.0.0.1:%d/video.html' % self.port)
|
||||||
self.assertEqual(r['url'], 'https://127.0.0.1:%d/vid.mp4' % self.port)
|
self.assertEqual(r['entries'][0]['url'], 'https://127.0.0.1:%d/vid.mp4' % self.port)
|
||||||
|
|
||||||
def test_certificate_combined_nopass(self):
|
def test_certificate_combined_nopass(self):
|
||||||
self._run_test(client_certificate=os.path.join(self.certdir, 'clientwithkey.crt'))
|
self._run_test(client_certificate=os.path.join(self.certdir, 'clientwithkey.crt'))
|
||||||
|
@ -53,7 +53,6 @@ from yt_dlp.utils import (
|
|||||||
fix_xml_ampersands,
|
fix_xml_ampersands,
|
||||||
float_or_none,
|
float_or_none,
|
||||||
format_bytes,
|
format_bytes,
|
||||||
get_compatible_ext,
|
|
||||||
get_element_by_attribute,
|
get_element_by_attribute,
|
||||||
get_element_by_class,
|
get_element_by_class,
|
||||||
get_element_html_by_attribute,
|
get_element_html_by_attribute,
|
||||||
@ -140,13 +139,13 @@ class TestUtil(unittest.TestCase):
|
|||||||
|
|
||||||
self.assertEqual(sanitize_filename('123'), '123')
|
self.assertEqual(sanitize_filename('123'), '123')
|
||||||
|
|
||||||
self.assertEqual('abc⧸de', sanitize_filename('abc/de'))
|
self.assertEqual('abc_de', sanitize_filename('abc/de'))
|
||||||
self.assertFalse('/' in sanitize_filename('abc/de///'))
|
self.assertFalse('/' in sanitize_filename('abc/de///'))
|
||||||
|
|
||||||
self.assertEqual('abc_de', sanitize_filename('abc/<>\\*|de', is_id=False))
|
self.assertEqual('abc_de', sanitize_filename('abc/<>\\*|de'))
|
||||||
self.assertEqual('xxx', sanitize_filename('xxx/<>\\*|', is_id=False))
|
self.assertEqual('xxx', sanitize_filename('xxx/<>\\*|'))
|
||||||
self.assertEqual('yes no', sanitize_filename('yes? no', is_id=False))
|
self.assertEqual('yes no', sanitize_filename('yes? no'))
|
||||||
self.assertEqual('this - that', sanitize_filename('this: that', is_id=False))
|
self.assertEqual('this - that', sanitize_filename('this: that'))
|
||||||
|
|
||||||
self.assertEqual(sanitize_filename('AT&T'), 'AT&T')
|
self.assertEqual(sanitize_filename('AT&T'), 'AT&T')
|
||||||
aumlaut = 'ä'
|
aumlaut = 'ä'
|
||||||
@ -369,7 +368,6 @@ class TestUtil(unittest.TestCase):
|
|||||||
self.assertEqual(unified_strdate('2012/10/11 01:56:38 +0000'), '20121011')
|
self.assertEqual(unified_strdate('2012/10/11 01:56:38 +0000'), '20121011')
|
||||||
self.assertEqual(unified_strdate('1968 12 10'), '19681210')
|
self.assertEqual(unified_strdate('1968 12 10'), '19681210')
|
||||||
self.assertEqual(unified_strdate('1968-12-10'), '19681210')
|
self.assertEqual(unified_strdate('1968-12-10'), '19681210')
|
||||||
self.assertEqual(unified_strdate('31-07-2022 20:00'), '20220731')
|
|
||||||
self.assertEqual(unified_strdate('28/01/2014 21:00:00 +0100'), '20140128')
|
self.assertEqual(unified_strdate('28/01/2014 21:00:00 +0100'), '20140128')
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
unified_strdate('11/26/2014 11:30:00 AM PST', day_first=False),
|
unified_strdate('11/26/2014 11:30:00 AM PST', day_first=False),
|
||||||
@ -1844,31 +1842,6 @@ Line 1
|
|||||||
self.assertEqual(determine_file_encoding('# coding: utf-32-be'.encode('utf-32-be')), ('utf-32-be', 0))
|
self.assertEqual(determine_file_encoding('# coding: utf-32-be'.encode('utf-32-be')), ('utf-32-be', 0))
|
||||||
self.assertEqual(determine_file_encoding('# coding: utf-16-le'.encode('utf-16-le')), ('utf-16-le', 0))
|
self.assertEqual(determine_file_encoding('# coding: utf-16-le'.encode('utf-16-le')), ('utf-16-le', 0))
|
||||||
|
|
||||||
def test_get_compatible_ext(self):
|
|
||||||
self.assertEqual(get_compatible_ext(
|
|
||||||
vcodecs=[None], acodecs=[None, None], vexts=['mp4'], aexts=['m4a', 'm4a']), 'mkv')
|
|
||||||
self.assertEqual(get_compatible_ext(
|
|
||||||
vcodecs=[None], acodecs=[None], vexts=['flv'], aexts=['flv']), 'flv')
|
|
||||||
|
|
||||||
self.assertEqual(get_compatible_ext(
|
|
||||||
vcodecs=[None], acodecs=[None], vexts=['mp4'], aexts=['m4a']), 'mp4')
|
|
||||||
self.assertEqual(get_compatible_ext(
|
|
||||||
vcodecs=[None], acodecs=[None], vexts=['mp4'], aexts=['webm']), 'mkv')
|
|
||||||
self.assertEqual(get_compatible_ext(
|
|
||||||
vcodecs=[None], acodecs=[None], vexts=['webm'], aexts=['m4a']), 'mkv')
|
|
||||||
self.assertEqual(get_compatible_ext(
|
|
||||||
vcodecs=[None], acodecs=[None], vexts=['webm'], aexts=['webm']), 'webm')
|
|
||||||
|
|
||||||
self.assertEqual(get_compatible_ext(
|
|
||||||
vcodecs=['h264'], acodecs=['mp4a'], vexts=['mov'], aexts=['m4a']), 'mp4')
|
|
||||||
self.assertEqual(get_compatible_ext(
|
|
||||||
vcodecs=['av01.0.12M.08'], acodecs=['opus'], vexts=['mp4'], aexts=['webm']), 'webm')
|
|
||||||
|
|
||||||
self.assertEqual(get_compatible_ext(
|
|
||||||
vcodecs=['vp9'], acodecs=['opus'], vexts=['webm'], aexts=['webm'], preferences=['flv', 'mp4']), 'mp4')
|
|
||||||
self.assertEqual(get_compatible_ext(
|
|
||||||
vcodecs=['av1'], acodecs=['mp4a'], vexts=['webm'], aexts=['m4a'], preferences=('webm', 'mkv')), 'mkv')
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
unittest.main()
|
unittest.main()
|
||||||
|
@ -52,7 +52,6 @@ from .utils import (
|
|||||||
DEFAULT_OUTTMPL,
|
DEFAULT_OUTTMPL,
|
||||||
IDENTITY,
|
IDENTITY,
|
||||||
LINK_TEMPLATES,
|
LINK_TEMPLATES,
|
||||||
MEDIA_EXTENSIONS,
|
|
||||||
NO_DEFAULT,
|
NO_DEFAULT,
|
||||||
NUMBER_RE,
|
NUMBER_RE,
|
||||||
OUTTMPL_TYPES,
|
OUTTMPL_TYPES,
|
||||||
@ -81,7 +80,6 @@ from .utils import (
|
|||||||
RejectedVideoReached,
|
RejectedVideoReached,
|
||||||
SameFileError,
|
SameFileError,
|
||||||
UnavailableVideoError,
|
UnavailableVideoError,
|
||||||
UserNotLive,
|
|
||||||
YoutubeDLCookieProcessor,
|
YoutubeDLCookieProcessor,
|
||||||
YoutubeDLHandler,
|
YoutubeDLHandler,
|
||||||
YoutubeDLRedirectHandler,
|
YoutubeDLRedirectHandler,
|
||||||
@ -102,13 +100,11 @@ from .utils import (
|
|||||||
format_decimal_suffix,
|
format_decimal_suffix,
|
||||||
format_field,
|
format_field,
|
||||||
formatSeconds,
|
formatSeconds,
|
||||||
get_compatible_ext,
|
|
||||||
get_domain,
|
get_domain,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
iri_to_uri,
|
iri_to_uri,
|
||||||
join_nonempty,
|
join_nonempty,
|
||||||
locked_file,
|
locked_file,
|
||||||
make_archive_id,
|
|
||||||
make_dir,
|
make_dir,
|
||||||
make_HTTPS_handler,
|
make_HTTPS_handler,
|
||||||
merge_headers,
|
merge_headers,
|
||||||
@ -135,7 +131,6 @@ from .utils import (
|
|||||||
timetuple_from_msec,
|
timetuple_from_msec,
|
||||||
to_high_limit_path,
|
to_high_limit_path,
|
||||||
traverse_obj,
|
traverse_obj,
|
||||||
try_call,
|
|
||||||
try_get,
|
try_get,
|
||||||
url_basename,
|
url_basename,
|
||||||
variadic,
|
variadic,
|
||||||
@ -144,7 +139,7 @@ from .utils import (
|
|||||||
write_json_file,
|
write_json_file,
|
||||||
write_string,
|
write_string,
|
||||||
)
|
)
|
||||||
from .version import RELEASE_GIT_HEAD, VARIANT, __version__
|
from .version import RELEASE_GIT_HEAD, __version__
|
||||||
|
|
||||||
if compat_os_name == 'nt':
|
if compat_os_name == 'nt':
|
||||||
import ctypes
|
import ctypes
|
||||||
@ -374,7 +369,7 @@ class YoutubeDL:
|
|||||||
|
|
||||||
Progress hooks are guaranteed to be called at least twice
|
Progress hooks are guaranteed to be called at least twice
|
||||||
(with status "started" and "finished") if the processing is successful.
|
(with status "started" and "finished") if the processing is successful.
|
||||||
merge_output_format: "/" separated list of extensions to use when merging formats.
|
merge_output_format: Extension to use when merging formats.
|
||||||
final_ext: Expected final extension; used to detect when the file was
|
final_ext: Expected final extension; used to detect when the file was
|
||||||
already downloaded and converted
|
already downloaded and converted
|
||||||
fixup: Automatically correct known faults of the file.
|
fixup: Automatically correct known faults of the file.
|
||||||
@ -527,8 +522,7 @@ class YoutubeDL:
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
_NUMERIC_FIELDS = {
|
_NUMERIC_FIELDS = {
|
||||||
'width', 'height', 'asr', 'audio_channels', 'fps',
|
'width', 'height', 'tbr', 'abr', 'asr', 'vbr', 'fps', 'filesize', 'filesize_approx',
|
||||||
'tbr', 'abr', 'vbr', 'filesize', 'filesize_approx',
|
|
||||||
'timestamp', 'release_timestamp',
|
'timestamp', 'release_timestamp',
|
||||||
'duration', 'view_count', 'like_count', 'dislike_count', 'repost_count',
|
'duration', 'view_count', 'like_count', 'dislike_count', 'repost_count',
|
||||||
'average_rating', 'comment_count', 'age_limit',
|
'average_rating', 'comment_count', 'age_limit',
|
||||||
@ -540,7 +534,7 @@ class YoutubeDL:
|
|||||||
_format_fields = {
|
_format_fields = {
|
||||||
# NB: Keep in sync with the docstring of extractor/common.py
|
# NB: Keep in sync with the docstring of extractor/common.py
|
||||||
'url', 'manifest_url', 'manifest_stream_number', 'ext', 'format', 'format_id', 'format_note',
|
'url', 'manifest_url', 'manifest_stream_number', 'ext', 'format', 'format_id', 'format_note',
|
||||||
'width', 'height', 'resolution', 'dynamic_range', 'tbr', 'abr', 'acodec', 'asr', 'audio_channels',
|
'width', 'height', 'resolution', 'dynamic_range', 'tbr', 'abr', 'acodec', 'asr',
|
||||||
'vbr', 'fps', 'vcodec', 'container', 'filesize', 'filesize_approx',
|
'vbr', 'fps', 'vcodec', 'container', 'filesize', 'filesize_approx',
|
||||||
'player_url', 'protocol', 'fragment_base_url', 'fragments', 'is_from_start',
|
'player_url', 'protocol', 'fragment_base_url', 'fragments', 'is_from_start',
|
||||||
'preference', 'language', 'language_preference', 'quality', 'source_preference',
|
'preference', 'language', 'language_preference', 'quality', 'source_preference',
|
||||||
@ -548,9 +542,9 @@ class YoutubeDL:
|
|||||||
'page_url', 'app', 'play_path', 'tc_url', 'flash_version', 'rtmp_live', 'rtmp_conn', 'rtmp_protocol', 'rtmp_real_time'
|
'page_url', 'app', 'play_path', 'tc_url', 'flash_version', 'rtmp_live', 'rtmp_conn', 'rtmp_protocol', 'rtmp_real_time'
|
||||||
}
|
}
|
||||||
_format_selection_exts = {
|
_format_selection_exts = {
|
||||||
'audio': set(MEDIA_EXTENSIONS.common_audio),
|
'audio': {'m4a', 'mp3', 'ogg', 'aac'},
|
||||||
'video': set(MEDIA_EXTENSIONS.common_video + ('3gp', )),
|
'video': {'mp4', 'flv', 'webm', '3gp'},
|
||||||
'storyboards': set(MEDIA_EXTENSIONS.storyboards),
|
'storyboards': {'mhtml'},
|
||||||
}
|
}
|
||||||
|
|
||||||
def __init__(self, params=None, auto_init=True):
|
def __init__(self, params=None, auto_init=True):
|
||||||
@ -590,8 +584,7 @@ class YoutubeDL:
|
|||||||
for type_, stream in self._out_files.items_ if type_ != 'console'
|
for type_, stream in self._out_files.items_ if type_ != 'console'
|
||||||
})
|
})
|
||||||
|
|
||||||
# The code is left like this to be reused for future deprecations
|
MIN_SUPPORTED, MIN_RECOMMENDED = (3, 6), (3, 7)
|
||||||
MIN_SUPPORTED, MIN_RECOMMENDED = (3, 7), (3, 7)
|
|
||||||
current_version = sys.version_info[:2]
|
current_version = sys.version_info[:2]
|
||||||
if current_version < MIN_RECOMMENDED:
|
if current_version < MIN_RECOMMENDED:
|
||||||
msg = ('Support for Python version %d.%d has been deprecated. '
|
msg = ('Support for Python version %d.%d has been deprecated. '
|
||||||
@ -1166,9 +1159,6 @@ class YoutubeDL:
|
|||||||
if mdict['strf_format']:
|
if mdict['strf_format']:
|
||||||
value = strftime_or_none(value, mdict['strf_format'].replace('\\,', ','))
|
value = strftime_or_none(value, mdict['strf_format'].replace('\\,', ','))
|
||||||
|
|
||||||
# XXX: Workaround for https://github.com/yt-dlp/yt-dlp/issues/4485
|
|
||||||
if sanitize and value == '':
|
|
||||||
value = None
|
|
||||||
return value
|
return value
|
||||||
|
|
||||||
na = self.params.get('outtmpl_na_placeholder', 'NA')
|
na = self.params.get('outtmpl_na_placeholder', 'NA')
|
||||||
@ -1318,7 +1308,7 @@ class YoutubeDL:
|
|||||||
def _match_entry(self, info_dict, incomplete=False, silent=False):
|
def _match_entry(self, info_dict, incomplete=False, silent=False):
|
||||||
""" Returns None if the file should be downloaded """
|
""" Returns None if the file should be downloaded """
|
||||||
|
|
||||||
video_title = info_dict.get('title', info_dict.get('id', 'entry'))
|
video_title = info_dict.get('title', info_dict.get('id', 'video'))
|
||||||
|
|
||||||
def check_filter():
|
def check_filter():
|
||||||
if 'title' in info_dict:
|
if 'title' in info_dict:
|
||||||
@ -1465,7 +1455,7 @@ class YoutubeDL:
|
|||||||
break
|
break
|
||||||
return wrapper
|
return wrapper
|
||||||
|
|
||||||
def _wait_for_video(self, ie_result={}):
|
def _wait_for_video(self, ie_result):
|
||||||
if (not self.params.get('wait_for_video')
|
if (not self.params.get('wait_for_video')
|
||||||
or ie_result.get('_type', 'video') != 'video'
|
or ie_result.get('_type', 'video') != 'video'
|
||||||
or ie_result.get('formats') or ie_result.get('url')):
|
or ie_result.get('formats') or ie_result.get('url')):
|
||||||
@ -1489,7 +1479,7 @@ class YoutubeDL:
|
|||||||
if diff is None and ie_result.get('live_status') == 'is_upcoming':
|
if diff is None and ie_result.get('live_status') == 'is_upcoming':
|
||||||
diff = round(random.uniform(min_wait, max_wait) if (max_wait and min_wait) else (max_wait or min_wait), 0)
|
diff = round(random.uniform(min_wait, max_wait) if (max_wait and min_wait) else (max_wait or min_wait), 0)
|
||||||
self.report_warning('Release time of video is not known')
|
self.report_warning('Release time of video is not known')
|
||||||
elif ie_result and (diff or 0) <= 0:
|
elif (diff or 0) <= 0:
|
||||||
self.report_warning('Video should already be available according to extracted info')
|
self.report_warning('Video should already be available according to extracted info')
|
||||||
diff = min(max(diff or 0, min_wait or 0), max_wait or float('inf'))
|
diff = min(max(diff or 0, min_wait or 0), max_wait or float('inf'))
|
||||||
self.to_screen(f'[wait] Waiting for {format_dur(diff)} - Press Ctrl+C to try now')
|
self.to_screen(f'[wait] Waiting for {format_dur(diff)} - Press Ctrl+C to try now')
|
||||||
@ -1513,14 +1503,7 @@ class YoutubeDL:
|
|||||||
|
|
||||||
@_handle_extraction_exceptions
|
@_handle_extraction_exceptions
|
||||||
def __extract_info(self, url, ie, download, extra_info, process):
|
def __extract_info(self, url, ie, download, extra_info, process):
|
||||||
try:
|
|
||||||
ie_result = ie.extract(url)
|
ie_result = ie.extract(url)
|
||||||
except UserNotLive as e:
|
|
||||||
if process:
|
|
||||||
if self.params.get('wait_for_video'):
|
|
||||||
self.report_warning(e)
|
|
||||||
self._wait_for_video()
|
|
||||||
raise
|
|
||||||
if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
|
if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
|
||||||
self.report_warning(f'Extractor {ie.IE_NAME} returned nothing{bug_reports_message()}')
|
self.report_warning(f'Extractor {ie.IE_NAME} returned nothing{bug_reports_message()}')
|
||||||
return
|
return
|
||||||
@ -1570,8 +1553,7 @@ class YoutubeDL:
|
|||||||
result_type = ie_result.get('_type', 'video')
|
result_type = ie_result.get('_type', 'video')
|
||||||
|
|
||||||
if result_type in ('url', 'url_transparent'):
|
if result_type in ('url', 'url_transparent'):
|
||||||
ie_result['url'] = sanitize_url(
|
ie_result['url'] = sanitize_url(ie_result['url'])
|
||||||
ie_result['url'], scheme='http' if self.params.get('prefer_insecure') else 'https')
|
|
||||||
if ie_result.get('original_url'):
|
if ie_result.get('original_url'):
|
||||||
extra_info.setdefault('original_url', ie_result['original_url'])
|
extra_info.setdefault('original_url', ie_result['original_url'])
|
||||||
|
|
||||||
@ -1694,37 +1676,23 @@ class YoutubeDL:
|
|||||||
return make_dir(path, self.report_error)
|
return make_dir(path, self.report_error)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _playlist_infodict(ie_result, strict=False, **kwargs):
|
def _playlist_infodict(ie_result, **kwargs):
|
||||||
info = {
|
return {
|
||||||
'playlist_count': ie_result.get('playlist_count'),
|
**ie_result,
|
||||||
'playlist': ie_result.get('title') or ie_result.get('id'),
|
'playlist': ie_result.get('title') or ie_result.get('id'),
|
||||||
'playlist_id': ie_result.get('id'),
|
'playlist_id': ie_result.get('id'),
|
||||||
'playlist_title': ie_result.get('title'),
|
'playlist_title': ie_result.get('title'),
|
||||||
'playlist_uploader': ie_result.get('uploader'),
|
'playlist_uploader': ie_result.get('uploader'),
|
||||||
'playlist_uploader_id': ie_result.get('uploader_id'),
|
'playlist_uploader_id': ie_result.get('uploader_id'),
|
||||||
**kwargs,
|
|
||||||
}
|
|
||||||
if strict:
|
|
||||||
return info
|
|
||||||
return {
|
|
||||||
**info,
|
|
||||||
'playlist_index': 0,
|
'playlist_index': 0,
|
||||||
'__last_playlist_index': max(ie_result['requested_entries'] or (0, 0)),
|
**kwargs,
|
||||||
'extractor': ie_result['extractor'],
|
|
||||||
'webpage_url': ie_result['webpage_url'],
|
|
||||||
'webpage_url_basename': url_basename(ie_result['webpage_url']),
|
|
||||||
'webpage_url_domain': get_domain(ie_result['webpage_url']),
|
|
||||||
'extractor_key': ie_result['extractor_key'],
|
|
||||||
}
|
}
|
||||||
|
|
||||||
def __process_playlist(self, ie_result, download):
|
def __process_playlist(self, ie_result, download):
|
||||||
"""Process each entry in the playlist"""
|
"""Process each entry in the playlist"""
|
||||||
assert ie_result['_type'] in ('playlist', 'multi_video')
|
assert ie_result['_type'] in ('playlist', 'multi_video')
|
||||||
|
|
||||||
common_info = self._playlist_infodict(ie_result, strict=True)
|
title = ie_result.get('title') or ie_result.get('id') or '<Untitled>'
|
||||||
title = common_info.get('playlist') or '<Untitled>'
|
|
||||||
if self._match_entry(common_info, incomplete=True) is not None:
|
|
||||||
return
|
|
||||||
self.to_screen(f'[download] Downloading {ie_result["_type"]}: {title}')
|
self.to_screen(f'[download] Downloading {ie_result["_type"]}: {title}')
|
||||||
|
|
||||||
all_entries = PlaylistEntries(self, ie_result)
|
all_entries = PlaylistEntries(self, ie_result)
|
||||||
@ -1742,14 +1710,12 @@ class YoutubeDL:
|
|||||||
# Better to do this after potentially exhausting entries
|
# Better to do this after potentially exhausting entries
|
||||||
ie_result['playlist_count'] = all_entries.get_full_count()
|
ie_result['playlist_count'] = all_entries.get_full_count()
|
||||||
|
|
||||||
extra = self._playlist_infodict(ie_result, n_entries=int_or_none(n_entries))
|
|
||||||
ie_copy = collections.ChainMap(ie_result, extra)
|
|
||||||
|
|
||||||
_infojson_written = False
|
_infojson_written = False
|
||||||
write_playlist_files = self.params.get('allow_playlist_files', True)
|
write_playlist_files = self.params.get('allow_playlist_files', True)
|
||||||
if write_playlist_files and self.params.get('list_thumbnails'):
|
if write_playlist_files and self.params.get('list_thumbnails'):
|
||||||
self.list_thumbnails(ie_result)
|
self.list_thumbnails(ie_result)
|
||||||
if write_playlist_files and not self.params.get('simulate'):
|
if write_playlist_files and not self.params.get('simulate'):
|
||||||
|
ie_copy = self._playlist_infodict(ie_result, n_entries=int_or_none(n_entries))
|
||||||
_infojson_written = self._write_info_json(
|
_infojson_written = self._write_info_json(
|
||||||
'playlist', ie_result, self.prepare_filename(ie_copy, 'pl_infojson'))
|
'playlist', ie_result, self.prepare_filename(ie_copy, 'pl_infojson'))
|
||||||
if _infojson_written is None:
|
if _infojson_written is None:
|
||||||
@ -1758,7 +1724,7 @@ class YoutubeDL:
|
|||||||
self.prepare_filename(ie_copy, 'pl_description')) is None:
|
self.prepare_filename(ie_copy, 'pl_description')) is None:
|
||||||
return
|
return
|
||||||
# TODO: This should be passed to ThumbnailsConvertor if necessary
|
# TODO: This should be passed to ThumbnailsConvertor if necessary
|
||||||
self._write_thumbnails('playlist', ie_result, self.prepare_filename(ie_copy, 'pl_thumbnail'))
|
self._write_thumbnails('playlist', ie_copy, self.prepare_filename(ie_copy, 'pl_thumbnail'))
|
||||||
|
|
||||||
if lazy:
|
if lazy:
|
||||||
if self.params.get('playlistreverse') or self.params.get('playlistrandom'):
|
if self.params.get('playlistreverse') or self.params.get('playlistrandom'):
|
||||||
@ -1782,33 +1748,35 @@ class YoutubeDL:
|
|||||||
for i, (playlist_index, entry) in enumerate(entries):
|
for i, (playlist_index, entry) in enumerate(entries):
|
||||||
if lazy:
|
if lazy:
|
||||||
resolved_entries.append((playlist_index, entry))
|
resolved_entries.append((playlist_index, entry))
|
||||||
if not entry:
|
|
||||||
continue
|
|
||||||
|
|
||||||
entry['__x_forwarded_for_ip'] = ie_result.get('__x_forwarded_for_ip')
|
# TODO: Add auto-generated fields
|
||||||
if not lazy and 'playlist-index' in self.params.get('compat_opts', []):
|
if not entry or self._match_entry(entry, incomplete=True) is not None:
|
||||||
playlist_index = ie_result['requested_entries'][i]
|
|
||||||
|
|
||||||
entry_copy = collections.ChainMap(entry, {
|
|
||||||
**common_info,
|
|
||||||
'n_entries': int_or_none(n_entries),
|
|
||||||
'playlist_index': playlist_index,
|
|
||||||
'playlist_autonumber': i + 1,
|
|
||||||
})
|
|
||||||
|
|
||||||
if self._match_entry(entry_copy, incomplete=True) is not None:
|
|
||||||
# For compatabilty with youtube-dl. See https://github.com/yt-dlp/yt-dlp/issues/4369
|
|
||||||
resolved_entries[i] = (playlist_index, NO_DEFAULT)
|
|
||||||
continue
|
continue
|
||||||
|
|
||||||
self.to_screen('[download] Downloading video %s of %s' % (
|
self.to_screen('[download] Downloading video %s of %s' % (
|
||||||
self._format_screen(i + 1, self.Styles.ID), self._format_screen(n_entries, self.Styles.EMPHASIS)))
|
self._format_screen(i + 1, self.Styles.ID), self._format_screen(n_entries, self.Styles.EMPHASIS)))
|
||||||
|
|
||||||
extra.update({
|
entry['__x_forwarded_for_ip'] = ie_result.get('__x_forwarded_for_ip')
|
||||||
|
if not lazy and 'playlist-index' in self.params.get('compat_opts', []):
|
||||||
|
playlist_index = ie_result['requested_entries'][i]
|
||||||
|
|
||||||
|
entry_result = self.__process_iterable_entry(entry, download, {
|
||||||
|
'n_entries': int_or_none(n_entries),
|
||||||
|
'__last_playlist_index': max(ie_result['requested_entries'] or (0, 0)),
|
||||||
|
'playlist_count': ie_result.get('playlist_count'),
|
||||||
'playlist_index': playlist_index,
|
'playlist_index': playlist_index,
|
||||||
'playlist_autonumber': i + 1,
|
'playlist_autonumber': i + 1,
|
||||||
|
'playlist': title,
|
||||||
|
'playlist_id': ie_result.get('id'),
|
||||||
|
'playlist_title': ie_result.get('title'),
|
||||||
|
'playlist_uploader': ie_result.get('uploader'),
|
||||||
|
'playlist_uploader_id': ie_result.get('uploader_id'),
|
||||||
|
'extractor': ie_result['extractor'],
|
||||||
|
'webpage_url': ie_result['webpage_url'],
|
||||||
|
'webpage_url_basename': url_basename(ie_result['webpage_url']),
|
||||||
|
'webpage_url_domain': get_domain(ie_result['webpage_url']),
|
||||||
|
'extractor_key': ie_result['extractor_key'],
|
||||||
})
|
})
|
||||||
entry_result = self.__process_iterable_entry(entry, download, extra)
|
|
||||||
if not entry_result:
|
if not entry_result:
|
||||||
failures += 1
|
failures += 1
|
||||||
if failures >= max_failures:
|
if failures >= max_failures:
|
||||||
@ -1819,8 +1787,7 @@ class YoutubeDL:
|
|||||||
resolved_entries[i] = (playlist_index, entry_result)
|
resolved_entries[i] = (playlist_index, entry_result)
|
||||||
|
|
||||||
# Update with processed data
|
# Update with processed data
|
||||||
ie_result['requested_entries'] = [i for i, e in resolved_entries if e is not NO_DEFAULT]
|
ie_result['requested_entries'], ie_result['entries'] = tuple(zip(*resolved_entries)) or ([], [])
|
||||||
ie_result['entries'] = [e for _, e in resolved_entries if e is not NO_DEFAULT]
|
|
||||||
|
|
||||||
# Write the updated info to json
|
# Write the updated info to json
|
||||||
if _infojson_written is True and self._write_info_json(
|
if _infojson_written is True and self._write_info_json(
|
||||||
@ -2094,13 +2061,14 @@ class YoutubeDL:
|
|||||||
the_only_video = video_fmts[0] if len(video_fmts) == 1 else None
|
the_only_video = video_fmts[0] if len(video_fmts) == 1 else None
|
||||||
the_only_audio = audio_fmts[0] if len(audio_fmts) == 1 else None
|
the_only_audio = audio_fmts[0] if len(audio_fmts) == 1 else None
|
||||||
|
|
||||||
output_ext = get_compatible_ext(
|
output_ext = self.params.get('merge_output_format')
|
||||||
vcodecs=[f.get('vcodec') for f in video_fmts],
|
if not output_ext:
|
||||||
acodecs=[f.get('acodec') for f in audio_fmts],
|
if the_only_video:
|
||||||
vexts=[f['ext'] for f in video_fmts],
|
output_ext = the_only_video['ext']
|
||||||
aexts=[f['ext'] for f in audio_fmts],
|
elif the_only_audio and not video_fmts:
|
||||||
preferences=(try_call(lambda: self.params['merge_output_format'].split('/'))
|
output_ext = the_only_audio['ext']
|
||||||
or self.params.get('prefer_free_formats') and ('webm', 'mkv')))
|
else:
|
||||||
|
output_ext = 'mkv'
|
||||||
|
|
||||||
filtered = lambda *keys: filter(None, (traverse_obj(fmt, *keys) for fmt in formats_info))
|
filtered = lambda *keys: filter(None, (traverse_obj(fmt, *keys) for fmt in formats_info))
|
||||||
|
|
||||||
@ -2133,7 +2101,6 @@ class YoutubeDL:
|
|||||||
'acodec': the_only_audio.get('acodec'),
|
'acodec': the_only_audio.get('acodec'),
|
||||||
'abr': the_only_audio.get('abr'),
|
'abr': the_only_audio.get('abr'),
|
||||||
'asr': the_only_audio.get('asr'),
|
'asr': the_only_audio.get('asr'),
|
||||||
'audio_channels': the_only_audio.get('audio_channels')
|
|
||||||
})
|
})
|
||||||
|
|
||||||
return new_dict
|
return new_dict
|
||||||
@ -2494,7 +2461,7 @@ class YoutubeDL:
|
|||||||
info_dict['_has_drm'] = any(f.get('has_drm') for f in formats) or None
|
info_dict['_has_drm'] = any(f.get('has_drm') for f in formats) or None
|
||||||
if not self.params.get('allow_unplayable_formats'):
|
if not self.params.get('allow_unplayable_formats'):
|
||||||
formats = [f for f in formats if not f.get('has_drm')]
|
formats = [f for f in formats if not f.get('has_drm')]
|
||||||
if info_dict['_has_drm'] and formats and all(
|
if info_dict['_has_drm'] and all(
|
||||||
f.get('acodec') == f.get('vcodec') == 'none' for f in formats):
|
f.get('acodec') == f.get('vcodec') == 'none' for f in formats):
|
||||||
self.report_warning(
|
self.report_warning(
|
||||||
'This video is DRM protected and only images are available for download. '
|
'This video is DRM protected and only images are available for download. '
|
||||||
@ -3073,9 +3040,33 @@ class YoutubeDL:
|
|||||||
return
|
return
|
||||||
|
|
||||||
if info_dict.get('requested_formats') is not None:
|
if info_dict.get('requested_formats') is not None:
|
||||||
|
|
||||||
|
def compatible_formats(formats):
|
||||||
|
# TODO: some formats actually allow this (mkv, webm, ogg, mp4), but not all of them.
|
||||||
|
video_formats = [format for format in formats if format.get('vcodec') != 'none']
|
||||||
|
audio_formats = [format for format in formats if format.get('acodec') != 'none']
|
||||||
|
if len(video_formats) > 2 or len(audio_formats) > 2:
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Check extension
|
||||||
|
exts = {format.get('ext') for format in formats}
|
||||||
|
COMPATIBLE_EXTS = (
|
||||||
|
{'mp3', 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'ismv', 'isma'},
|
||||||
|
{'webm'},
|
||||||
|
)
|
||||||
|
for ext_sets in COMPATIBLE_EXTS:
|
||||||
|
if ext_sets.issuperset(exts):
|
||||||
|
return True
|
||||||
|
# TODO: Check acodec/vcodec
|
||||||
|
return False
|
||||||
|
|
||||||
requested_formats = info_dict['requested_formats']
|
requested_formats = info_dict['requested_formats']
|
||||||
old_ext = info_dict['ext']
|
old_ext = info_dict['ext']
|
||||||
if self.params.get('merge_output_format') is None:
|
if self.params.get('merge_output_format') is None:
|
||||||
|
if not compatible_formats(requested_formats):
|
||||||
|
info_dict['ext'] = 'mkv'
|
||||||
|
self.report_warning(
|
||||||
|
'Requested formats are incompatible for merge and will be merged into mkv')
|
||||||
if (info_dict['ext'] == 'webm'
|
if (info_dict['ext'] == 'webm'
|
||||||
and info_dict.get('thumbnails')
|
and info_dict.get('thumbnails')
|
||||||
# check with type instead of pp_key, __name__, or isinstance
|
# check with type instead of pp_key, __name__, or isinstance
|
||||||
@ -3435,16 +3426,18 @@ class YoutubeDL:
|
|||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
return
|
return
|
||||||
return make_archive_id(extractor, video_id)
|
return f'{extractor.lower()} {video_id}'
|
||||||
|
|
||||||
def in_download_archive(self, info_dict):
|
def in_download_archive(self, info_dict):
|
||||||
fn = self.params.get('download_archive')
|
fn = self.params.get('download_archive')
|
||||||
if fn is None:
|
if fn is None:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
vid_ids = [self._make_archive_id(info_dict)]
|
vid_id = self._make_archive_id(info_dict)
|
||||||
vid_ids.extend(info_dict.get('_old_archive_ids', []))
|
if not vid_id:
|
||||||
return any(id_ in self.archive for id_ in vid_ids)
|
return False # Incomplete video information
|
||||||
|
|
||||||
|
return vid_id in self.archive
|
||||||
|
|
||||||
def record_download_archive(self, info_dict):
|
def record_download_archive(self, info_dict):
|
||||||
fn = self.params.get('download_archive')
|
fn = self.params.get('download_archive')
|
||||||
@ -3574,7 +3567,6 @@ class YoutubeDL:
|
|||||||
format_field(f, func=self.format_resolution, ignore=('audio only', 'images')),
|
format_field(f, func=self.format_resolution, ignore=('audio only', 'images')),
|
||||||
format_field(f, 'fps', '\t%d', func=round),
|
format_field(f, 'fps', '\t%d', func=round),
|
||||||
format_field(f, 'dynamic_range', '%s', ignore=(None, 'SDR')).replace('HDR', ''),
|
format_field(f, 'dynamic_range', '%s', ignore=(None, 'SDR')).replace('HDR', ''),
|
||||||
format_field(f, 'audio_channels', '\t%s'),
|
|
||||||
delim,
|
delim,
|
||||||
format_field(f, 'filesize', ' \t%s', func=format_bytes) + format_field(f, 'filesize_approx', '~\t%s', func=format_bytes),
|
format_field(f, 'filesize', ' \t%s', func=format_bytes) + format_field(f, 'filesize_approx', '~\t%s', func=format_bytes),
|
||||||
format_field(f, 'tbr', '\t%dk', func=round),
|
format_field(f, 'tbr', '\t%dk', func=round),
|
||||||
@ -3594,7 +3586,7 @@ class YoutubeDL:
|
|||||||
delim=' '),
|
delim=' '),
|
||||||
] for f in formats if f.get('preference') is None or f['preference'] >= -1000]
|
] for f in formats if f.get('preference') is None or f['preference'] >= -1000]
|
||||||
header_line = self._list_format_headers(
|
header_line = self._list_format_headers(
|
||||||
'ID', 'EXT', 'RESOLUTION', '\tFPS', 'HDR', 'CH', delim, '\tFILESIZE', '\tTBR', 'PROTO',
|
'ID', 'EXT', 'RESOLUTION', '\tFPS', 'HDR', delim, '\tFILESIZE', '\tTBR', 'PROTO',
|
||||||
delim, 'VCODEC', '\tVBR', 'ACODEC', '\tABR', '\tASR', 'MORE INFO')
|
delim, 'VCODEC', '\tVBR', 'ACODEC', '\tABR', '\tASR', 'MORE INFO')
|
||||||
|
|
||||||
return render_table(
|
return render_table(
|
||||||
@ -3679,8 +3671,6 @@ class YoutubeDL:
|
|||||||
write_debug = lambda msg: self._write_string(f'[debug] {msg}\n')
|
write_debug = lambda msg: self._write_string(f'[debug] {msg}\n')
|
||||||
|
|
||||||
source = detect_variant()
|
source = detect_variant()
|
||||||
if VARIANT not in (None, 'pip'):
|
|
||||||
source += '*'
|
|
||||||
write_debug(join_nonempty(
|
write_debug(join_nonempty(
|
||||||
'yt-dlp version', __version__,
|
'yt-dlp version', __version__,
|
||||||
f'[{RELEASE_GIT_HEAD}]' if RELEASE_GIT_HEAD else '',
|
f'[{RELEASE_GIT_HEAD}]' if RELEASE_GIT_HEAD else '',
|
||||||
|
@ -1,8 +1,4 @@
|
|||||||
try:
|
f'You are using an unsupported version of Python. Only Python versions 3.6 and above are supported by yt-dlp' # noqa: F541
|
||||||
import contextvars # noqa: F401
|
|
||||||
except Exception:
|
|
||||||
raise Exception(
|
|
||||||
f'You are using an unsupported version of Python. Only Python versions 3.7 and above are supported by yt-dlp') # noqa: F541
|
|
||||||
|
|
||||||
__license__ = 'Public Domain'
|
__license__ = 'Public Domain'
|
||||||
|
|
||||||
@ -24,8 +20,6 @@ from .extractor.common import InfoExtractor
|
|||||||
from .options import parseOpts
|
from .options import parseOpts
|
||||||
from .postprocessor import (
|
from .postprocessor import (
|
||||||
FFmpegExtractAudioPP,
|
FFmpegExtractAudioPP,
|
||||||
FFmpegMergerPP,
|
|
||||||
FFmpegPostProcessor,
|
|
||||||
FFmpegSubtitlesConvertorPP,
|
FFmpegSubtitlesConvertorPP,
|
||||||
FFmpegThumbnailsConvertorPP,
|
FFmpegThumbnailsConvertorPP,
|
||||||
FFmpegVideoConvertorPP,
|
FFmpegVideoConvertorPP,
|
||||||
@ -228,8 +222,6 @@ def validate_options(opts):
|
|||||||
validate_regex('format sorting', f, InfoExtractor.FormatSort.regex)
|
validate_regex('format sorting', f, InfoExtractor.FormatSort.regex)
|
||||||
|
|
||||||
# Postprocessor formats
|
# Postprocessor formats
|
||||||
validate_regex('merge output format', opts.merge_output_format,
|
|
||||||
r'({0})(/({0}))*'.format('|'.join(map(re.escape, FFmpegMergerPP.SUPPORTED_EXTS))))
|
|
||||||
validate_regex('audio format', opts.audioformat, FFmpegExtractAudioPP.FORMAT_RE)
|
validate_regex('audio format', opts.audioformat, FFmpegExtractAudioPP.FORMAT_RE)
|
||||||
validate_in('subtitle format', opts.convertsubtitles, FFmpegSubtitlesConvertorPP.SUPPORTED_EXTS)
|
validate_in('subtitle format', opts.convertsubtitles, FFmpegSubtitlesConvertorPP.SUPPORTED_EXTS)
|
||||||
validate_regex('thumbnail format', opts.convertthumbnails, FFmpegThumbnailsConvertorPP.FORMAT_RE)
|
validate_regex('thumbnail format', opts.convertthumbnails, FFmpegThumbnailsConvertorPP.FORMAT_RE)
|
||||||
@ -907,11 +899,6 @@ def _real_main(argv=None):
|
|||||||
if print_extractor_information(opts, all_urls):
|
if print_extractor_information(opts, all_urls):
|
||||||
return
|
return
|
||||||
|
|
||||||
# We may need ffmpeg_location without having access to the YoutubeDL instance
|
|
||||||
# See https://github.com/yt-dlp/yt-dlp/issues/2191
|
|
||||||
if opts.ffmpeg_location:
|
|
||||||
FFmpegPostProcessor._ffmpeg_location.set(opts.ffmpeg_location)
|
|
||||||
|
|
||||||
with YoutubeDL(ydl_opts) as ydl:
|
with YoutubeDL(ydl_opts) as ydl:
|
||||||
pre_process = opts.update_self or opts.rm_cachedir
|
pre_process = opts.update_self or opts.rm_cachedir
|
||||||
actual_use = all_urls or opts.load_info_filename
|
actual_use = all_urls or opts.load_info_filename
|
||||||
|
@ -3,12 +3,13 @@ import sys
|
|||||||
import warnings
|
import warnings
|
||||||
import xml.etree.ElementTree as etree
|
import xml.etree.ElementTree as etree
|
||||||
|
|
||||||
|
from . import re
|
||||||
from ._deprecated import * # noqa: F401, F403
|
from ._deprecated import * # noqa: F401, F403
|
||||||
from .compat_utils import passthrough_module
|
from .compat_utils import passthrough_module
|
||||||
|
|
||||||
# XXX: Implement this the same way as other DeprecationWarnings without circular import
|
# XXX: Implement this the same way as other DeprecationWarnings without circular import
|
||||||
passthrough_module(__name__, '._legacy', callback=lambda attr: warnings.warn(
|
passthrough_module(__name__, '._legacy', callback=lambda attr: warnings.warn(
|
||||||
DeprecationWarning(f'{__name__}.{attr} is deprecated'), stacklevel=3))
|
DeprecationWarning(f'{__name__}.{attr} is deprecated'), stacklevel=2))
|
||||||
|
|
||||||
|
|
||||||
# HTMLParseError has been deprecated in Python 3.3 and removed in
|
# HTMLParseError has been deprecated in Python 3.3 and removed in
|
||||||
@ -32,7 +33,6 @@ compat_os_name = os._name if os.name == 'java' else os.name
|
|||||||
|
|
||||||
if compat_os_name == 'nt':
|
if compat_os_name == 'nt':
|
||||||
def compat_shlex_quote(s):
|
def compat_shlex_quote(s):
|
||||||
import re
|
|
||||||
return s if re.match(r'^[-_\w./]+$', s) else '"%s"' % s.replace('"', '\\"')
|
return s if re.match(r'^[-_\w./]+$', s) else '"%s"' % s.replace('"', '\\"')
|
||||||
else:
|
else:
|
||||||
from shlex import quote as compat_shlex_quote # noqa: F401
|
from shlex import quote as compat_shlex_quote # noqa: F401
|
||||||
|
@ -22,14 +22,10 @@ import urllib.request
|
|||||||
import xml.etree.ElementTree as etree
|
import xml.etree.ElementTree as etree
|
||||||
from subprocess import DEVNULL
|
from subprocess import DEVNULL
|
||||||
|
|
||||||
# isort: split
|
from .compat_utils import passthrough_module # isort: split
|
||||||
import asyncio # noqa: F401
|
from .asyncio import run as compat_asyncio_run # noqa: F401
|
||||||
import re # noqa: F401
|
from .re import Pattern as compat_Pattern # noqa: F401
|
||||||
from asyncio import run as compat_asyncio_run # noqa: F401
|
from .re import match as compat_Match # noqa: F401
|
||||||
from re import Pattern as compat_Pattern # noqa: F401
|
|
||||||
from re import match as compat_Match # noqa: F401
|
|
||||||
|
|
||||||
from .compat_utils import passthrough_module
|
|
||||||
from ..dependencies import Cryptodome_AES as compat_pycrypto_AES # noqa: F401
|
from ..dependencies import Cryptodome_AES as compat_pycrypto_AES # noqa: F401
|
||||||
from ..dependencies import brotli as compat_brotli # noqa: F401
|
from ..dependencies import brotli as compat_brotli # noqa: F401
|
||||||
from ..dependencies import websockets as compat_websockets # noqa: F401
|
from ..dependencies import websockets as compat_websockets # noqa: F401
|
||||||
|
23
yt_dlp/compat/asyncio.py
Normal file
23
yt_dlp/compat/asyncio.py
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
# flake8: noqa: F405
|
||||||
|
from asyncio import * # noqa: F403
|
||||||
|
|
||||||
|
from .compat_utils import passthrough_module
|
||||||
|
|
||||||
|
passthrough_module(__name__, 'asyncio')
|
||||||
|
del passthrough_module
|
||||||
|
|
||||||
|
try:
|
||||||
|
run # >= 3.7
|
||||||
|
except NameError:
|
||||||
|
def run(coro):
|
||||||
|
try:
|
||||||
|
loop = get_event_loop()
|
||||||
|
except RuntimeError:
|
||||||
|
loop = new_event_loop()
|
||||||
|
set_event_loop(loop)
|
||||||
|
loop.run_until_complete(coro)
|
||||||
|
|
||||||
|
try:
|
||||||
|
all_tasks # >= 3.7
|
||||||
|
except NameError:
|
||||||
|
all_tasks = Task.all_tasks
|
@ -2,15 +2,13 @@ tests = {
|
|||||||
'webp': lambda h: h[0:4] == b'RIFF' and h[8:] == b'WEBP',
|
'webp': lambda h: h[0:4] == b'RIFF' and h[8:] == b'WEBP',
|
||||||
'png': lambda h: h[:8] == b'\211PNG\r\n\032\n',
|
'png': lambda h: h[:8] == b'\211PNG\r\n\032\n',
|
||||||
'jpeg': lambda h: h[6:10] in (b'JFIF', b'Exif'),
|
'jpeg': lambda h: h[6:10] in (b'JFIF', b'Exif'),
|
||||||
'gif': lambda h: h[:6] in (b'GIF87a', b'GIF89a'),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
def what(file=None, h=None):
|
def what(path):
|
||||||
"""Detect format of image (Currently supports jpeg, png, webp, gif only)
|
"""Detect format of image (Currently supports jpeg, png, webp only)
|
||||||
Ref: https://github.com/python/cpython/blob/3.10/Lib/imghdr.py
|
Ref: https://github.com/python/cpython/blob/3.10/Lib/imghdr.py
|
||||||
"""
|
"""
|
||||||
if h is None:
|
with open(path, 'rb') as f:
|
||||||
with open(file, 'rb') as f:
|
head = f.read(12)
|
||||||
h = f.read(12)
|
return next((type_ for type_, test in tests.items() if test(head)), None)
|
||||||
return next((type_ for type_, test in tests.items() if test(h)), None)
|
|
||||||
|
18
yt_dlp/compat/re.py
Normal file
18
yt_dlp/compat/re.py
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
# flake8: noqa: F405
|
||||||
|
from re import * # F403
|
||||||
|
|
||||||
|
from .compat_utils import passthrough_module
|
||||||
|
|
||||||
|
passthrough_module(__name__, 're')
|
||||||
|
del passthrough_module
|
||||||
|
|
||||||
|
try:
|
||||||
|
Pattern # >= 3.7
|
||||||
|
except NameError:
|
||||||
|
Pattern = type(compile(''))
|
||||||
|
|
||||||
|
|
||||||
|
try:
|
||||||
|
Match # >= 3.7
|
||||||
|
except NameError:
|
||||||
|
Match = type(compile('').match(''))
|
@ -1,5 +1,6 @@
|
|||||||
import base64
|
import base64
|
||||||
import contextlib
|
import contextlib
|
||||||
|
import ctypes
|
||||||
import http.cookiejar
|
import http.cookiejar
|
||||||
import json
|
import json
|
||||||
import os
|
import os
|
||||||
@ -875,12 +876,10 @@ def _decrypt_windows_dpapi(ciphertext, logger):
|
|||||||
References:
|
References:
|
||||||
- https://docs.microsoft.com/en-us/windows/win32/api/dpapi/nf-dpapi-cryptunprotectdata
|
- https://docs.microsoft.com/en-us/windows/win32/api/dpapi/nf-dpapi-cryptunprotectdata
|
||||||
"""
|
"""
|
||||||
|
from ctypes.wintypes import DWORD
|
||||||
import ctypes
|
|
||||||
import ctypes.wintypes
|
|
||||||
|
|
||||||
class DATA_BLOB(ctypes.Structure):
|
class DATA_BLOB(ctypes.Structure):
|
||||||
_fields_ = [('cbData', ctypes.wintypes.DWORD),
|
_fields_ = [('cbData', DWORD),
|
||||||
('pbData', ctypes.POINTER(ctypes.c_char))]
|
('pbData', ctypes.POINTER(ctypes.c_char))]
|
||||||
|
|
||||||
buffer = ctypes.create_string_buffer(ciphertext)
|
buffer = ctypes.create_string_buffer(ciphertext)
|
||||||
|
@ -28,7 +28,7 @@ try:
|
|||||||
except ImportError:
|
except ImportError:
|
||||||
try:
|
try:
|
||||||
from Crypto.Cipher import AES as Cryptodome_AES
|
from Crypto.Cipher import AES as Cryptodome_AES
|
||||||
except (ImportError, SyntaxError): # Old Crypto gives SyntaxError in newer Python
|
except ImportError:
|
||||||
Cryptodome_AES = None
|
Cryptodome_AES = None
|
||||||
else:
|
else:
|
||||||
try:
|
try:
|
||||||
|
@ -1,6 +1,5 @@
|
|||||||
import contextlib
|
import contextlib
|
||||||
import errno
|
import errno
|
||||||
import functools
|
|
||||||
import os
|
import os
|
||||||
import random
|
import random
|
||||||
import re
|
import re
|
||||||
@ -13,15 +12,14 @@ from ..minicurses import (
|
|||||||
QuietMultilinePrinter,
|
QuietMultilinePrinter,
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
IDENTITY,
|
|
||||||
NO_DEFAULT,
|
|
||||||
NUMBER_RE,
|
NUMBER_RE,
|
||||||
LockingUnsupportedError,
|
LockingUnsupportedError,
|
||||||
Namespace,
|
Namespace,
|
||||||
RetryManager,
|
|
||||||
classproperty,
|
classproperty,
|
||||||
decodeArgument,
|
decodeArgument,
|
||||||
encodeFilename,
|
encodeFilename,
|
||||||
|
error_to_compat_str,
|
||||||
|
float_or_none,
|
||||||
format_bytes,
|
format_bytes,
|
||||||
join_nonempty,
|
join_nonempty,
|
||||||
sanitize_open,
|
sanitize_open,
|
||||||
@ -217,24 +215,27 @@ class FileDownloader:
|
|||||||
return filename + '.ytdl'
|
return filename + '.ytdl'
|
||||||
|
|
||||||
def wrap_file_access(action, *, fatal=False):
|
def wrap_file_access(action, *, fatal=False):
|
||||||
def error_callback(err, count, retries, *, fd):
|
def outer(func):
|
||||||
return RetryManager.report_retry(
|
def inner(self, *args, **kwargs):
|
||||||
err, count, retries, info=fd.__to_screen,
|
file_access_retries = self.params.get('file_access_retries', 0)
|
||||||
warn=lambda e: (time.sleep(0.01), fd.to_screen(f'[download] Unable to {action} file: {e}')),
|
retry = 0
|
||||||
error=None if fatal else lambda e: fd.report_error(f'Unable to {action} file: {e}'),
|
while True:
|
||||||
sleep_func=fd.params.get('retry_sleep_functions', {}).get('file_access'))
|
|
||||||
|
|
||||||
def wrapper(self, func, *args, **kwargs):
|
|
||||||
for retry in RetryManager(self.params.get('file_access_retries'), error_callback, fd=self):
|
|
||||||
try:
|
try:
|
||||||
return func(self, *args, **kwargs)
|
return func(self, *args, **kwargs)
|
||||||
except OSError as err:
|
except OSError as err:
|
||||||
if err.errno in (errno.EACCES, errno.EINVAL):
|
retry = retry + 1
|
||||||
retry.error = err
|
if retry > file_access_retries or err.errno not in (errno.EACCES, errno.EINVAL):
|
||||||
continue
|
if not fatal:
|
||||||
retry.error_callback(err, 1, 0)
|
self.report_error(f'unable to {action} file: {err}')
|
||||||
|
return
|
||||||
return functools.partial(functools.partialmethod, wrapper)
|
raise
|
||||||
|
self.to_screen(
|
||||||
|
f'[download] Unable to {action} file due to file access error. '
|
||||||
|
f'Retrying (attempt {retry} of {self.format_retries(file_access_retries)}) ...')
|
||||||
|
if not self.sleep_retry('file_access', retry):
|
||||||
|
time.sleep(0.01)
|
||||||
|
return inner
|
||||||
|
return outer
|
||||||
|
|
||||||
@wrap_file_access('open', fatal=True)
|
@wrap_file_access('open', fatal=True)
|
||||||
def sanitize_open(self, filename, open_mode):
|
def sanitize_open(self, filename, open_mode):
|
||||||
@ -334,10 +335,7 @@ class FileDownloader:
|
|||||||
if s['status'] == 'finished':
|
if s['status'] == 'finished':
|
||||||
if self.params.get('noprogress'):
|
if self.params.get('noprogress'):
|
||||||
self.to_screen('[download] Download completed')
|
self.to_screen('[download] Download completed')
|
||||||
speed = try_call(lambda: s['total_bytes'] / s['elapsed'])
|
|
||||||
s.update({
|
s.update({
|
||||||
'speed': speed,
|
|
||||||
'_speed_str': self.format_speed(speed).strip(),
|
|
||||||
'_total_bytes_str': format_bytes(s.get('total_bytes')),
|
'_total_bytes_str': format_bytes(s.get('total_bytes')),
|
||||||
'_elapsed_str': self.format_seconds(s.get('elapsed')),
|
'_elapsed_str': self.format_seconds(s.get('elapsed')),
|
||||||
'_percent_str': self.format_percent(100),
|
'_percent_str': self.format_percent(100),
|
||||||
@ -346,7 +344,6 @@ class FileDownloader:
|
|||||||
'100%%',
|
'100%%',
|
||||||
with_fields(('total_bytes', 'of %(_total_bytes_str)s')),
|
with_fields(('total_bytes', 'of %(_total_bytes_str)s')),
|
||||||
with_fields(('elapsed', 'in %(_elapsed_str)s')),
|
with_fields(('elapsed', 'in %(_elapsed_str)s')),
|
||||||
with_fields(('speed', 'at %(_speed_str)s')),
|
|
||||||
delim=' '))
|
delim=' '))
|
||||||
|
|
||||||
if s['status'] != 'downloading':
|
if s['status'] != 'downloading':
|
||||||
@ -381,20 +378,25 @@ class FileDownloader:
|
|||||||
"""Report attempt to resume at given byte."""
|
"""Report attempt to resume at given byte."""
|
||||||
self.to_screen('[download] Resuming download at byte %s' % resume_len)
|
self.to_screen('[download] Resuming download at byte %s' % resume_len)
|
||||||
|
|
||||||
def report_retry(self, err, count, retries, frag_index=NO_DEFAULT, fatal=True):
|
def report_retry(self, err, count, retries):
|
||||||
"""Report retry"""
|
"""Report retry in case of HTTP error 5xx"""
|
||||||
is_frag = False if frag_index is NO_DEFAULT else 'fragment'
|
self.__to_screen(
|
||||||
RetryManager.report_retry(
|
'[download] Got server HTTP error: %s. Retrying (attempt %d of %s) ...'
|
||||||
err, count, retries, info=self.__to_screen,
|
% (error_to_compat_str(err), count, self.format_retries(retries)))
|
||||||
warn=lambda msg: self.__to_screen(f'[download] Got error: {msg}'),
|
self.sleep_retry('http', count)
|
||||||
error=IDENTITY if not fatal else lambda e: self.report_error(f'\r[download] Got error: {e}'),
|
|
||||||
sleep_func=self.params.get('retry_sleep_functions', {}).get(is_frag or 'http'),
|
|
||||||
suffix=f'fragment{"s" if frag_index is None else f" {frag_index}"}' if is_frag else None)
|
|
||||||
|
|
||||||
def report_unable_to_resume(self):
|
def report_unable_to_resume(self):
|
||||||
"""Report it was impossible to resume download."""
|
"""Report it was impossible to resume download."""
|
||||||
self.to_screen('[download] Unable to resume')
|
self.to_screen('[download] Unable to resume')
|
||||||
|
|
||||||
|
def sleep_retry(self, retry_type, count):
|
||||||
|
sleep_func = self.params.get('retry_sleep_functions', {}).get(retry_type)
|
||||||
|
delay = float_or_none(sleep_func(n=count - 1)) if sleep_func else None
|
||||||
|
if delay:
|
||||||
|
self.__to_screen(f'Sleeping {delay:.2f} seconds ...')
|
||||||
|
time.sleep(delay)
|
||||||
|
return sleep_func is not None
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def supports_manifest(manifest):
|
def supports_manifest(manifest):
|
||||||
""" Whether the downloader can download the fragments from the manifest.
|
""" Whether the downloader can download the fragments from the manifest.
|
||||||
|
@ -10,7 +10,6 @@ from ..compat import functools
|
|||||||
from ..postprocessor.ffmpeg import EXT_TO_OUT_FORMATS, FFmpegPostProcessor
|
from ..postprocessor.ffmpeg import EXT_TO_OUT_FORMATS, FFmpegPostProcessor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
Popen,
|
Popen,
|
||||||
RetryManager,
|
|
||||||
_configuration_args,
|
_configuration_args,
|
||||||
check_executable,
|
check_executable,
|
||||||
classproperty,
|
classproperty,
|
||||||
@ -135,21 +134,28 @@ class ExternalFD(FragmentFD):
|
|||||||
self.to_stderr(stderr)
|
self.to_stderr(stderr)
|
||||||
return returncode
|
return returncode
|
||||||
|
|
||||||
|
fragment_retries = self.params.get('fragment_retries', 0)
|
||||||
skip_unavailable_fragments = self.params.get('skip_unavailable_fragments', True)
|
skip_unavailable_fragments = self.params.get('skip_unavailable_fragments', True)
|
||||||
|
|
||||||
retry_manager = RetryManager(self.params.get('fragment_retries'), self.report_retry,
|
count = 0
|
||||||
frag_index=None, fatal=not skip_unavailable_fragments)
|
while count <= fragment_retries:
|
||||||
for retry in retry_manager:
|
|
||||||
_, stderr, returncode = Popen.run(cmd, text=True, stderr=subprocess.PIPE)
|
_, stderr, returncode = Popen.run(cmd, text=True, stderr=subprocess.PIPE)
|
||||||
if not returncode:
|
if not returncode:
|
||||||
break
|
break
|
||||||
|
|
||||||
# TODO: Decide whether to retry based on error code
|
# TODO: Decide whether to retry based on error code
|
||||||
# https://aria2.github.io/manual/en/html/aria2c.html#exit-status
|
# https://aria2.github.io/manual/en/html/aria2c.html#exit-status
|
||||||
if stderr:
|
if stderr:
|
||||||
self.to_stderr(stderr)
|
self.to_stderr(stderr)
|
||||||
retry.error = Exception()
|
count += 1
|
||||||
continue
|
if count <= fragment_retries:
|
||||||
if not skip_unavailable_fragments and retry_manager.error:
|
self.to_screen(
|
||||||
|
'[%s] Got error. Retrying fragments (attempt %d of %s)...'
|
||||||
|
% (self.get_basename(), count, self.format_retries(fragment_retries)))
|
||||||
|
self.sleep_retry('fragment', count)
|
||||||
|
if count > fragment_retries:
|
||||||
|
if not skip_unavailable_fragments:
|
||||||
|
self.report_error('Giving up after %s fragment retries' % fragment_retries)
|
||||||
return -1
|
return -1
|
||||||
|
|
||||||
decrypt_fragment = self.decrypter(info_dict)
|
decrypt_fragment = self.decrypter(info_dict)
|
||||||
|
@ -14,8 +14,8 @@ from ..aes import aes_cbc_decrypt_bytes, unpad_pkcs7
|
|||||||
from ..compat import compat_os_name
|
from ..compat import compat_os_name
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
DownloadError,
|
DownloadError,
|
||||||
RetryManager,
|
|
||||||
encodeFilename,
|
encodeFilename,
|
||||||
|
error_to_compat_str,
|
||||||
sanitized_Request,
|
sanitized_Request,
|
||||||
traverse_obj,
|
traverse_obj,
|
||||||
)
|
)
|
||||||
@ -65,9 +65,10 @@ class FragmentFD(FileDownloader):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
def report_retry_fragment(self, err, frag_index, count, retries):
|
def report_retry_fragment(self, err, frag_index, count, retries):
|
||||||
self.deprecation_warning(
|
self.to_screen(
|
||||||
'yt_dlp.downloader.FragmentFD.report_retry_fragment is deprecated. Use yt_dlp.downloader.FileDownloader.report_retry instead')
|
'\r[download] Got server HTTP error: %s. Retrying fragment %d (attempt %d of %s) ...'
|
||||||
return self.report_retry(err, count, retries, frag_index)
|
% (error_to_compat_str(err), frag_index, count, self.format_retries(retries)))
|
||||||
|
self.sleep_retry('fragment', count)
|
||||||
|
|
||||||
def report_skip_fragment(self, frag_index, err=None):
|
def report_skip_fragment(self, frag_index, err=None):
|
||||||
err = f' {err};' if err else ''
|
err = f' {err};' if err else ''
|
||||||
@ -346,8 +347,6 @@ class FragmentFD(FileDownloader):
|
|||||||
return _key_cache[url]
|
return _key_cache[url]
|
||||||
|
|
||||||
def decrypt_fragment(fragment, frag_content):
|
def decrypt_fragment(fragment, frag_content):
|
||||||
if frag_content is None:
|
|
||||||
return
|
|
||||||
decrypt_info = fragment.get('decrypt_info')
|
decrypt_info = fragment.get('decrypt_info')
|
||||||
if not decrypt_info or decrypt_info['METHOD'] != 'AES-128':
|
if not decrypt_info or decrypt_info['METHOD'] != 'AES-128':
|
||||||
return frag_content
|
return frag_content
|
||||||
@ -433,6 +432,7 @@ class FragmentFD(FileDownloader):
|
|||||||
if not interrupt_trigger:
|
if not interrupt_trigger:
|
||||||
interrupt_trigger = (True, )
|
interrupt_trigger = (True, )
|
||||||
|
|
||||||
|
fragment_retries = self.params.get('fragment_retries', 0)
|
||||||
is_fatal = (
|
is_fatal = (
|
||||||
((lambda _: False) if info_dict.get('is_live') else (lambda idx: idx == 0))
|
((lambda _: False) if info_dict.get('is_live') else (lambda idx: idx == 0))
|
||||||
if self.params.get('skip_unavailable_fragments', True) else (lambda _: True))
|
if self.params.get('skip_unavailable_fragments', True) else (lambda _: True))
|
||||||
@ -452,26 +452,33 @@ class FragmentFD(FileDownloader):
|
|||||||
headers['Range'] = 'bytes=%d-%d' % (byte_range['start'], byte_range['end'] - 1)
|
headers['Range'] = 'bytes=%d-%d' % (byte_range['start'], byte_range['end'] - 1)
|
||||||
|
|
||||||
# Never skip the first fragment
|
# Never skip the first fragment
|
||||||
fatal = is_fatal(fragment.get('index') or (frag_index - 1))
|
fatal, count = is_fatal(fragment.get('index') or (frag_index - 1)), 0
|
||||||
|
while count <= fragment_retries:
|
||||||
def error_callback(err, count, retries):
|
|
||||||
if fatal and count > retries:
|
|
||||||
ctx['dest_stream'].close()
|
|
||||||
self.report_retry(err, count, retries, frag_index, fatal)
|
|
||||||
ctx['last_error'] = err
|
|
||||||
|
|
||||||
for retry in RetryManager(self.params.get('fragment_retries'), error_callback):
|
|
||||||
try:
|
try:
|
||||||
ctx['fragment_count'] = fragment.get('fragment_count')
|
ctx['fragment_count'] = fragment.get('fragment_count')
|
||||||
if not self._download_fragment(ctx, fragment['url'], info_dict, headers):
|
if self._download_fragment(ctx, fragment['url'], info_dict, headers):
|
||||||
|
break
|
||||||
return
|
return
|
||||||
except (urllib.error.HTTPError, http.client.IncompleteRead) as err:
|
except (urllib.error.HTTPError, http.client.IncompleteRead) as err:
|
||||||
retry.error = err
|
# Unavailable (possibly temporary) fragments may be served.
|
||||||
continue
|
# First we try to retry then either skip or abort.
|
||||||
except DownloadError: # has own retry settings
|
# See https://github.com/ytdl-org/youtube-dl/issues/10165,
|
||||||
if fatal:
|
# https://github.com/ytdl-org/youtube-dl/issues/10448).
|
||||||
|
count += 1
|
||||||
|
ctx['last_error'] = err
|
||||||
|
if count <= fragment_retries:
|
||||||
|
self.report_retry_fragment(err, frag_index, count, fragment_retries)
|
||||||
|
except DownloadError:
|
||||||
|
# Don't retry fragment if error occurred during HTTP downloading
|
||||||
|
# itself since it has own retry settings
|
||||||
|
if not fatal:
|
||||||
|
break
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
if count > fragment_retries and fatal:
|
||||||
|
ctx['dest_stream'].close()
|
||||||
|
self.report_error('Giving up after %s fragment retries' % fragment_retries)
|
||||||
|
|
||||||
def append_fragment(frag_content, frag_index, ctx):
|
def append_fragment(frag_content, frag_index, ctx):
|
||||||
if frag_content:
|
if frag_content:
|
||||||
self._append_fragment(ctx, pack_func(frag_content, frag_index))
|
self._append_fragment(ctx, pack_func(frag_content, frag_index))
|
||||||
|
@ -9,7 +9,6 @@ import urllib.error
|
|||||||
from .common import FileDownloader
|
from .common import FileDownloader
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ContentTooShortError,
|
ContentTooShortError,
|
||||||
RetryManager,
|
|
||||||
ThrottledDownload,
|
ThrottledDownload,
|
||||||
XAttrMetadataError,
|
XAttrMetadataError,
|
||||||
XAttrUnavailableError,
|
XAttrUnavailableError,
|
||||||
@ -73,6 +72,9 @@ class HttpFD(FileDownloader):
|
|||||||
|
|
||||||
ctx.is_resume = ctx.resume_len > 0
|
ctx.is_resume = ctx.resume_len > 0
|
||||||
|
|
||||||
|
count = 0
|
||||||
|
retries = self.params.get('retries', 0)
|
||||||
|
|
||||||
class SucceedDownload(Exception):
|
class SucceedDownload(Exception):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@ -347,7 +349,9 @@ class HttpFD(FileDownloader):
|
|||||||
|
|
||||||
if data_len is not None and byte_counter != data_len:
|
if data_len is not None and byte_counter != data_len:
|
||||||
err = ContentTooShortError(byte_counter, int(data_len))
|
err = ContentTooShortError(byte_counter, int(data_len))
|
||||||
|
if count <= retries:
|
||||||
retry(err)
|
retry(err)
|
||||||
|
raise err
|
||||||
|
|
||||||
self.try_rename(ctx.tmpfilename, ctx.filename)
|
self.try_rename(ctx.tmpfilename, ctx.filename)
|
||||||
|
|
||||||
@ -366,20 +370,24 @@ class HttpFD(FileDownloader):
|
|||||||
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
for retry in RetryManager(self.params.get('retries'), self.report_retry):
|
while count <= retries:
|
||||||
try:
|
try:
|
||||||
establish_connection()
|
establish_connection()
|
||||||
return download()
|
return download()
|
||||||
except RetryDownload as err:
|
except RetryDownload as e:
|
||||||
retry.error = err.source_error
|
count += 1
|
||||||
|
if count <= retries:
|
||||||
|
self.report_retry(e.source_error, count, retries)
|
||||||
|
else:
|
||||||
|
self.to_screen(f'[download] Got server HTTP error: {e.source_error}')
|
||||||
continue
|
continue
|
||||||
except NextFragment:
|
except NextFragment:
|
||||||
retry.error = None
|
|
||||||
retry.attempt -= 1
|
|
||||||
continue
|
continue
|
||||||
except SucceedDownload:
|
except SucceedDownload:
|
||||||
return True
|
return True
|
||||||
except: # noqa: E722
|
except: # noqa: E722
|
||||||
close_stream()
|
close_stream()
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
self.report_error('giving up after %s retries' % retries)
|
||||||
return False
|
return False
|
||||||
|
@ -5,7 +5,6 @@ import time
|
|||||||
import urllib.error
|
import urllib.error
|
||||||
|
|
||||||
from .fragment import FragmentFD
|
from .fragment import FragmentFD
|
||||||
from ..utils import RetryManager
|
|
||||||
|
|
||||||
u8 = struct.Struct('>B')
|
u8 = struct.Struct('>B')
|
||||||
u88 = struct.Struct('>Bx')
|
u88 = struct.Struct('>Bx')
|
||||||
@ -246,6 +245,7 @@ class IsmFD(FragmentFD):
|
|||||||
'ism_track_written': False,
|
'ism_track_written': False,
|
||||||
})
|
})
|
||||||
|
|
||||||
|
fragment_retries = self.params.get('fragment_retries', 0)
|
||||||
skip_unavailable_fragments = self.params.get('skip_unavailable_fragments', True)
|
skip_unavailable_fragments = self.params.get('skip_unavailable_fragments', True)
|
||||||
|
|
||||||
frag_index = 0
|
frag_index = 0
|
||||||
@ -253,10 +253,8 @@ class IsmFD(FragmentFD):
|
|||||||
frag_index += 1
|
frag_index += 1
|
||||||
if frag_index <= ctx['fragment_index']:
|
if frag_index <= ctx['fragment_index']:
|
||||||
continue
|
continue
|
||||||
|
count = 0
|
||||||
retry_manager = RetryManager(self.params.get('fragment_retries'), self.report_retry,
|
while count <= fragment_retries:
|
||||||
frag_index=frag_index, fatal=not skip_unavailable_fragments)
|
|
||||||
for retry in retry_manager:
|
|
||||||
try:
|
try:
|
||||||
success = self._download_fragment(ctx, segment['url'], info_dict)
|
success = self._download_fragment(ctx, segment['url'], info_dict)
|
||||||
if not success:
|
if not success:
|
||||||
@ -269,14 +267,18 @@ class IsmFD(FragmentFD):
|
|||||||
write_piff_header(ctx['dest_stream'], info_dict['_download_params'])
|
write_piff_header(ctx['dest_stream'], info_dict['_download_params'])
|
||||||
extra_state['ism_track_written'] = True
|
extra_state['ism_track_written'] = True
|
||||||
self._append_fragment(ctx, frag_content)
|
self._append_fragment(ctx, frag_content)
|
||||||
|
break
|
||||||
except urllib.error.HTTPError as err:
|
except urllib.error.HTTPError as err:
|
||||||
retry.error = err
|
count += 1
|
||||||
continue
|
if count <= fragment_retries:
|
||||||
|
self.report_retry_fragment(err, frag_index, count, fragment_retries)
|
||||||
if retry_manager.error:
|
if count > fragment_retries:
|
||||||
if not skip_unavailable_fragments:
|
if skip_unavailable_fragments:
|
||||||
return False
|
|
||||||
self.report_skip_fragment(frag_index)
|
self.report_skip_fragment(frag_index)
|
||||||
|
continue
|
||||||
|
self.report_error('giving up after %s fragment retries' % fragment_retries)
|
||||||
|
return False
|
||||||
|
|
||||||
self._finish_frag_download(ctx, info_dict)
|
self._finish_frag_download(ctx, info_dict)
|
||||||
|
|
||||||
return True
|
return True
|
||||||
|
@ -4,7 +4,6 @@ import re
|
|||||||
import uuid
|
import uuid
|
||||||
|
|
||||||
from .fragment import FragmentFD
|
from .fragment import FragmentFD
|
||||||
from ..compat import imghdr
|
|
||||||
from ..utils import escapeHTML, formatSeconds, srt_subtitles_timecode, urljoin
|
from ..utils import escapeHTML, formatSeconds, srt_subtitles_timecode, urljoin
|
||||||
from ..version import __version__ as YT_DLP_VERSION
|
from ..version import __version__ as YT_DLP_VERSION
|
||||||
|
|
||||||
@ -167,13 +166,21 @@ body > figure > img {
|
|||||||
continue
|
continue
|
||||||
frag_content = self._read_fragment(ctx)
|
frag_content = self._read_fragment(ctx)
|
||||||
|
|
||||||
|
mime_type = b'image/jpeg'
|
||||||
|
if frag_content.startswith(b'\x89PNG\r\n\x1a\n'):
|
||||||
|
mime_type = b'image/png'
|
||||||
|
if frag_content.startswith((b'GIF87a', b'GIF89a')):
|
||||||
|
mime_type = b'image/gif'
|
||||||
|
if frag_content.startswith(b'RIFF') and frag_content[8:12] == b'WEBP':
|
||||||
|
mime_type = b'image/webp'
|
||||||
|
|
||||||
frag_header = io.BytesIO()
|
frag_header = io.BytesIO()
|
||||||
frag_header.write(
|
frag_header.write(
|
||||||
b'--%b\r\n' % frag_boundary.encode('us-ascii'))
|
b'--%b\r\n' % frag_boundary.encode('us-ascii'))
|
||||||
frag_header.write(
|
frag_header.write(
|
||||||
b'Content-ID: <%b>\r\n' % self._gen_cid(i, fragment, frag_boundary).encode('us-ascii'))
|
b'Content-ID: <%b>\r\n' % self._gen_cid(i, fragment, frag_boundary).encode('us-ascii'))
|
||||||
frag_header.write(
|
frag_header.write(
|
||||||
b'Content-type: %b\r\n' % f'image/{imghdr.what(h=frag_content) or "jpeg"}'.encode())
|
b'Content-type: %b\r\n' % mime_type)
|
||||||
frag_header.write(
|
frag_header.write(
|
||||||
b'Content-length: %u\r\n' % len(frag_content))
|
b'Content-length: %u\r\n' % len(frag_content))
|
||||||
frag_header.write(
|
frag_header.write(
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
import asyncio
|
|
||||||
import contextlib
|
import contextlib
|
||||||
import os
|
import os
|
||||||
import signal
|
import signal
|
||||||
@ -6,6 +5,7 @@ import threading
|
|||||||
|
|
||||||
from .common import FileDownloader
|
from .common import FileDownloader
|
||||||
from .external import FFmpegFD
|
from .external import FFmpegFD
|
||||||
|
from ..compat import asyncio
|
||||||
from ..dependencies import websockets
|
from ..dependencies import websockets
|
||||||
|
|
||||||
|
|
||||||
|
@ -3,13 +3,7 @@ import time
|
|||||||
import urllib.error
|
import urllib.error
|
||||||
|
|
||||||
from .fragment import FragmentFD
|
from .fragment import FragmentFD
|
||||||
from ..utils import (
|
from ..utils import RegexNotFoundError, dict_get, int_or_none, try_get
|
||||||
RegexNotFoundError,
|
|
||||||
RetryManager,
|
|
||||||
dict_get,
|
|
||||||
int_or_none,
|
|
||||||
try_get,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class YoutubeLiveChatFD(FragmentFD):
|
class YoutubeLiveChatFD(FragmentFD):
|
||||||
@ -22,6 +16,7 @@ class YoutubeLiveChatFD(FragmentFD):
|
|||||||
self.report_warning('Live chat download runs until the livestream ends. '
|
self.report_warning('Live chat download runs until the livestream ends. '
|
||||||
'If you wish to download the video simultaneously, run a separate yt-dlp instance')
|
'If you wish to download the video simultaneously, run a separate yt-dlp instance')
|
||||||
|
|
||||||
|
fragment_retries = self.params.get('fragment_retries', 0)
|
||||||
test = self.params.get('test', False)
|
test = self.params.get('test', False)
|
||||||
|
|
||||||
ctx = {
|
ctx = {
|
||||||
@ -109,7 +104,8 @@ class YoutubeLiveChatFD(FragmentFD):
|
|||||||
return continuation_id, live_offset, click_tracking_params
|
return continuation_id, live_offset, click_tracking_params
|
||||||
|
|
||||||
def download_and_parse_fragment(url, frag_index, request_data=None, headers=None):
|
def download_and_parse_fragment(url, frag_index, request_data=None, headers=None):
|
||||||
for retry in RetryManager(self.params.get('fragment_retries'), self.report_retry, frag_index=frag_index):
|
count = 0
|
||||||
|
while count <= fragment_retries:
|
||||||
try:
|
try:
|
||||||
success = dl_fragment(url, request_data, headers)
|
success = dl_fragment(url, request_data, headers)
|
||||||
if not success:
|
if not success:
|
||||||
@ -124,14 +120,20 @@ class YoutubeLiveChatFD(FragmentFD):
|
|||||||
live_chat_continuation = try_get(
|
live_chat_continuation = try_get(
|
||||||
data,
|
data,
|
||||||
lambda x: x['continuationContents']['liveChatContinuation'], dict) or {}
|
lambda x: x['continuationContents']['liveChatContinuation'], dict) or {}
|
||||||
|
if info_dict['protocol'] == 'youtube_live_chat_replay':
|
||||||
func = (info_dict['protocol'] == 'youtube_live_chat' and parse_actions_live
|
if frag_index == 1:
|
||||||
or frag_index == 1 and try_refresh_replay_beginning
|
continuation_id, offset, click_tracking_params = try_refresh_replay_beginning(live_chat_continuation)
|
||||||
or parse_actions_replay)
|
else:
|
||||||
return (True, *func(live_chat_continuation))
|
continuation_id, offset, click_tracking_params = parse_actions_replay(live_chat_continuation)
|
||||||
|
elif info_dict['protocol'] == 'youtube_live_chat':
|
||||||
|
continuation_id, offset, click_tracking_params = parse_actions_live(live_chat_continuation)
|
||||||
|
return True, continuation_id, offset, click_tracking_params
|
||||||
except urllib.error.HTTPError as err:
|
except urllib.error.HTTPError as err:
|
||||||
retry.error = err
|
count += 1
|
||||||
continue
|
if count <= fragment_retries:
|
||||||
|
self.report_retry_fragment(err, frag_index, count, fragment_retries)
|
||||||
|
if count > fragment_retries:
|
||||||
|
self.report_error('giving up after %s fragment retries' % fragment_retries)
|
||||||
return False, None, None, None
|
return False, None, None, None
|
||||||
|
|
||||||
self._prepare_and_start_frag_download(ctx, info_dict)
|
self._prepare_and_start_frag_download(ctx, info_dict)
|
||||||
|
@ -60,7 +60,6 @@ from .americastestkitchen import (
|
|||||||
AmericasTestKitchenIE,
|
AmericasTestKitchenIE,
|
||||||
AmericasTestKitchenSeasonIE,
|
AmericasTestKitchenSeasonIE,
|
||||||
)
|
)
|
||||||
from .angel import AngelIE
|
|
||||||
from .animeondemand import AnimeOnDemandIE
|
from .animeondemand import AnimeOnDemandIE
|
||||||
from .anvato import AnvatoIE
|
from .anvato import AnvatoIE
|
||||||
from .aol import AolIE
|
from .aol import AolIE
|
||||||
@ -219,7 +218,6 @@ from .camdemy import (
|
|||||||
CamdemyFolderIE
|
CamdemyFolderIE
|
||||||
)
|
)
|
||||||
from .cammodels import CamModelsIE
|
from .cammodels import CamModelsIE
|
||||||
from .camtasia import CamtasiaEmbedIE
|
|
||||||
from .camwithher import CamWithHerIE
|
from .camwithher import CamWithHerIE
|
||||||
from .canalalpha import CanalAlphaIE
|
from .canalalpha import CanalAlphaIE
|
||||||
from .canalplus import CanalplusIE
|
from .canalplus import CanalplusIE
|
||||||
@ -410,7 +408,6 @@ from .dplay import (
|
|||||||
DiscoveryLifeIE,
|
DiscoveryLifeIE,
|
||||||
AnimalPlanetIE,
|
AnimalPlanetIE,
|
||||||
TLCIE,
|
TLCIE,
|
||||||
MotorTrendIE,
|
|
||||||
DiscoveryPlusIndiaIE,
|
DiscoveryPlusIndiaIE,
|
||||||
DiscoveryNetworksDeIE,
|
DiscoveryNetworksDeIE,
|
||||||
DiscoveryPlusItalyIE,
|
DiscoveryPlusItalyIE,
|
||||||
@ -446,7 +443,7 @@ from .dw import (
|
|||||||
DWIE,
|
DWIE,
|
||||||
DWArticleIE,
|
DWArticleIE,
|
||||||
)
|
)
|
||||||
from .eagleplatform import EaglePlatformIE, ClipYouEmbedIE
|
from .eagleplatform import EaglePlatformIE
|
||||||
from .ebaumsworld import EbaumsWorldIE
|
from .ebaumsworld import EbaumsWorldIE
|
||||||
from .echomsk import EchoMskIE
|
from .echomsk import EchoMskIE
|
||||||
from .egghead import (
|
from .egghead import (
|
||||||
@ -631,7 +628,6 @@ from .gronkh import (
|
|||||||
GronkhVodsIE
|
GronkhVodsIE
|
||||||
)
|
)
|
||||||
from .groupon import GrouponIE
|
from .groupon import GrouponIE
|
||||||
from .harpodeon import HarpodeonIE
|
|
||||||
from .hbo import HBOIE
|
from .hbo import HBOIE
|
||||||
from .hearthisat import HearThisAtIE
|
from .hearthisat import HearThisAtIE
|
||||||
from .heise import HeiseIE
|
from .heise import HeiseIE
|
||||||
@ -644,7 +640,6 @@ from .hidive import HiDiveIE
|
|||||||
from .historicfilms import HistoricFilmsIE
|
from .historicfilms import HistoricFilmsIE
|
||||||
from .hitbox import HitboxIE, HitboxLiveIE
|
from .hitbox import HitboxIE, HitboxLiveIE
|
||||||
from .hitrecord import HitRecordIE
|
from .hitrecord import HitRecordIE
|
||||||
from .holodex import HolodexIE
|
|
||||||
from .hotnewhiphop import HotNewHipHopIE
|
from .hotnewhiphop import HotNewHipHopIE
|
||||||
from .hotstar import (
|
from .hotstar import (
|
||||||
HotStarIE,
|
HotStarIE,
|
||||||
@ -663,7 +658,6 @@ from .hse import (
|
|||||||
HSEShowIE,
|
HSEShowIE,
|
||||||
HSEProductIE,
|
HSEProductIE,
|
||||||
)
|
)
|
||||||
from .genericembeds import HTML5MediaEmbedIE
|
|
||||||
from .huajiao import HuajiaoIE
|
from .huajiao import HuajiaoIE
|
||||||
from .huya import HuyaLiveIE
|
from .huya import HuyaLiveIE
|
||||||
from .huffpost import HuffPostIE
|
from .huffpost import HuffPostIE
|
||||||
@ -766,7 +760,6 @@ from .kicker import KickerIE
|
|||||||
from .kickstarter import KickStarterIE
|
from .kickstarter import KickStarterIE
|
||||||
from .kinja import KinjaEmbedIE
|
from .kinja import KinjaEmbedIE
|
||||||
from .kinopoisk import KinoPoiskIE
|
from .kinopoisk import KinoPoiskIE
|
||||||
from .kompas import KompasVideoIE
|
|
||||||
from .konserthusetplay import KonserthusetPlayIE
|
from .konserthusetplay import KonserthusetPlayIE
|
||||||
from .koo import KooIE
|
from .koo import KooIE
|
||||||
from .kth import KTHIE
|
from .kth import KTHIE
|
||||||
@ -1212,8 +1205,19 @@ from .openrec import (
|
|||||||
from .ora import OraTVIE
|
from .ora import OraTVIE
|
||||||
from .orf import (
|
from .orf import (
|
||||||
ORFTVthekIE,
|
ORFTVthekIE,
|
||||||
|
ORFFM4IE,
|
||||||
ORFFM4StoryIE,
|
ORFFM4StoryIE,
|
||||||
ORFRadioIE,
|
ORFOE1IE,
|
||||||
|
ORFOE3IE,
|
||||||
|
ORFNOEIE,
|
||||||
|
ORFWIEIE,
|
||||||
|
ORFBGLIE,
|
||||||
|
ORFOOEIE,
|
||||||
|
ORFSTMIE,
|
||||||
|
ORFKTNIE,
|
||||||
|
ORFSBGIE,
|
||||||
|
ORFTIRIE,
|
||||||
|
ORFVBGIE,
|
||||||
ORFIPTVIE,
|
ORFIPTVIE,
|
||||||
)
|
)
|
||||||
from .outsidetv import OutsideTVIE
|
from .outsidetv import OutsideTVIE
|
||||||
@ -1236,10 +1240,11 @@ from .paramountplus import (
|
|||||||
ParamountPlusIE,
|
ParamountPlusIE,
|
||||||
ParamountPlusSeriesIE,
|
ParamountPlusSeriesIE,
|
||||||
)
|
)
|
||||||
|
from .parliamentliveuk import ParliamentLiveUKIE
|
||||||
from .parlview import ParlviewIE
|
from .parlview import ParlviewIE
|
||||||
from .patreon import (
|
from .patreon import (
|
||||||
PatreonIE,
|
PatreonIE,
|
||||||
PatreonCampaignIE
|
PatreonUserIE
|
||||||
)
|
)
|
||||||
from .pbs import PBSIE
|
from .pbs import PBSIE
|
||||||
from .pearvideo import PearVideoIE
|
from .pearvideo import PearVideoIE
|
||||||
@ -1386,8 +1391,6 @@ from .rai import (
|
|||||||
RaiPlaySoundIE,
|
RaiPlaySoundIE,
|
||||||
RaiPlaySoundLiveIE,
|
RaiPlaySoundLiveIE,
|
||||||
RaiPlaySoundPlaylistIE,
|
RaiPlaySoundPlaylistIE,
|
||||||
RaiNewsIE,
|
|
||||||
RaiSudtirolIE,
|
|
||||||
RaiIE,
|
RaiIE,
|
||||||
)
|
)
|
||||||
from .raywenderlich import (
|
from .raywenderlich import (
|
||||||
@ -1406,7 +1409,6 @@ from .rcti import (
|
|||||||
RCTIPlusTVIE,
|
RCTIPlusTVIE,
|
||||||
)
|
)
|
||||||
from .rds import RDSIE
|
from .rds import RDSIE
|
||||||
from .redbee import ParliamentLiveUKIE, RTBFIE
|
|
||||||
from .redbulltv import (
|
from .redbulltv import (
|
||||||
RedBullTVIE,
|
RedBullTVIE,
|
||||||
RedBullEmbedIE,
|
RedBullEmbedIE,
|
||||||
@ -1440,6 +1442,7 @@ from .rokfin import (
|
|||||||
from .roosterteeth import RoosterTeethIE, RoosterTeethSeriesIE
|
from .roosterteeth import RoosterTeethIE, RoosterTeethSeriesIE
|
||||||
from .rottentomatoes import RottenTomatoesIE
|
from .rottentomatoes import RottenTomatoesIE
|
||||||
from .rozhlas import RozhlasIE
|
from .rozhlas import RozhlasIE
|
||||||
|
from .rtbf import RTBFIE
|
||||||
from .rte import RteIE, RteRadioIE
|
from .rte import RteIE, RteRadioIE
|
||||||
from .rtlnl import (
|
from .rtlnl import (
|
||||||
RtlNlIE,
|
RtlNlIE,
|
||||||
@ -1545,7 +1548,6 @@ from .shared import (
|
|||||||
SharedIE,
|
SharedIE,
|
||||||
VivoIE,
|
VivoIE,
|
||||||
)
|
)
|
||||||
from .sharevideos import ShareVideosEmbedIE
|
|
||||||
from .shemaroome import ShemarooMeIE
|
from .shemaroome import ShemarooMeIE
|
||||||
from .showroomlive import ShowRoomLiveIE
|
from .showroomlive import ShowRoomLiveIE
|
||||||
from .simplecast import (
|
from .simplecast import (
|
||||||
@ -1723,7 +1725,6 @@ from .telequebec import (
|
|||||||
)
|
)
|
||||||
from .teletask import TeleTaskIE
|
from .teletask import TeleTaskIE
|
||||||
from .telewebion import TelewebionIE
|
from .telewebion import TelewebionIE
|
||||||
from .tempo import TempoIE
|
|
||||||
from .tennistv import TennisTVIE
|
from .tennistv import TennisTVIE
|
||||||
from .tenplay import TenPlayIE
|
from .tenplay import TenPlayIE
|
||||||
from .testurl import TestURLIE
|
from .testurl import TestURLIE
|
||||||
|
@ -1,6 +1,5 @@
|
|||||||
import base64
|
import base64
|
||||||
import binascii
|
import binascii
|
||||||
import functools
|
|
||||||
import hashlib
|
import hashlib
|
||||||
import hmac
|
import hmac
|
||||||
import io
|
import io
|
||||||
@ -21,11 +20,11 @@ from ..utils import (
|
|||||||
decode_base_n,
|
decode_base_n,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
intlist_to_bytes,
|
intlist_to_bytes,
|
||||||
OnDemandPagedList,
|
|
||||||
request_to_url,
|
request_to_url,
|
||||||
time_seconds,
|
time_seconds,
|
||||||
traverse_obj,
|
traverse_obj,
|
||||||
update_url_query,
|
update_url_query,
|
||||||
|
urljoin,
|
||||||
)
|
)
|
||||||
|
|
||||||
# NOTE: network handler related code is temporary thing until network stack overhaul PRs are merged (#2861/#2862)
|
# NOTE: network handler related code is temporary thing until network stack overhaul PRs are merged (#2861/#2862)
|
||||||
@ -146,106 +145,17 @@ class AbemaLicenseHandler(urllib.request.BaseHandler):
|
|||||||
|
|
||||||
|
|
||||||
class AbemaTVBaseIE(InfoExtractor):
|
class AbemaTVBaseIE(InfoExtractor):
|
||||||
_USERTOKEN = None
|
|
||||||
_DEVICE_ID = None
|
|
||||||
_MEDIATOKEN = None
|
|
||||||
|
|
||||||
_SECRETKEY = b'v+Gjs=25Aw5erR!J8ZuvRrCx*rGswhB&qdHd_SYerEWdU&a?3DzN9BRbp5KwY4hEmcj5#fykMjJ=AuWz5GSMY-d@H7DMEh3M@9n2G552Us$$k9cD=3TxwWe86!x#Zyhe'
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def _generate_aks(cls, deviceid):
|
|
||||||
deviceid = deviceid.encode('utf-8')
|
|
||||||
# add 1 hour and then drop minute and secs
|
|
||||||
ts_1hour = int((time_seconds(hours=9) // 3600 + 1) * 3600)
|
|
||||||
time_struct = time.gmtime(ts_1hour)
|
|
||||||
ts_1hour_str = str(ts_1hour).encode('utf-8')
|
|
||||||
|
|
||||||
tmp = None
|
|
||||||
|
|
||||||
def mix_once(nonce):
|
|
||||||
nonlocal tmp
|
|
||||||
h = hmac.new(cls._SECRETKEY, digestmod=hashlib.sha256)
|
|
||||||
h.update(nonce)
|
|
||||||
tmp = h.digest()
|
|
||||||
|
|
||||||
def mix_tmp(count):
|
|
||||||
nonlocal tmp
|
|
||||||
for i in range(count):
|
|
||||||
mix_once(tmp)
|
|
||||||
|
|
||||||
def mix_twist(nonce):
|
|
||||||
nonlocal tmp
|
|
||||||
mix_once(base64.urlsafe_b64encode(tmp).rstrip(b'=') + nonce)
|
|
||||||
|
|
||||||
mix_once(cls._SECRETKEY)
|
|
||||||
mix_tmp(time_struct.tm_mon)
|
|
||||||
mix_twist(deviceid)
|
|
||||||
mix_tmp(time_struct.tm_mday % 5)
|
|
||||||
mix_twist(ts_1hour_str)
|
|
||||||
mix_tmp(time_struct.tm_hour % 5)
|
|
||||||
|
|
||||||
return base64.urlsafe_b64encode(tmp).rstrip(b'=').decode('utf-8')
|
|
||||||
|
|
||||||
def _get_device_token(self):
|
|
||||||
if self._USERTOKEN:
|
|
||||||
return self._USERTOKEN
|
|
||||||
|
|
||||||
AbemaTVBaseIE._DEVICE_ID = str(uuid.uuid4())
|
|
||||||
aks = self._generate_aks(self._DEVICE_ID)
|
|
||||||
user_data = self._download_json(
|
|
||||||
'https://api.abema.io/v1/users', None, note='Authorizing',
|
|
||||||
data=json.dumps({
|
|
||||||
'deviceId': self._DEVICE_ID,
|
|
||||||
'applicationKeySecret': aks,
|
|
||||||
}).encode('utf-8'),
|
|
||||||
headers={
|
|
||||||
'Content-Type': 'application/json',
|
|
||||||
})
|
|
||||||
AbemaTVBaseIE._USERTOKEN = user_data['token']
|
|
||||||
|
|
||||||
# don't allow adding it 2 times or more, though it's guarded
|
|
||||||
remove_opener(self._downloader, AbemaLicenseHandler)
|
|
||||||
add_opener(self._downloader, AbemaLicenseHandler(self))
|
|
||||||
|
|
||||||
return self._USERTOKEN
|
|
||||||
|
|
||||||
def _get_media_token(self, invalidate=False, to_show=True):
|
|
||||||
if not invalidate and self._MEDIATOKEN:
|
|
||||||
return self._MEDIATOKEN
|
|
||||||
|
|
||||||
AbemaTVBaseIE._MEDIATOKEN = self._download_json(
|
|
||||||
'https://api.abema.io/v1/media/token', None, note='Fetching media token' if to_show else False,
|
|
||||||
query={
|
|
||||||
'osName': 'android',
|
|
||||||
'osVersion': '6.0.1',
|
|
||||||
'osLang': 'ja_JP',
|
|
||||||
'osTimezone': 'Asia/Tokyo',
|
|
||||||
'appId': 'tv.abema',
|
|
||||||
'appVersion': '3.27.1'
|
|
||||||
}, headers={
|
|
||||||
'Authorization': f'bearer {self._get_device_token()}',
|
|
||||||
})['token']
|
|
||||||
|
|
||||||
return self._MEDIATOKEN
|
|
||||||
|
|
||||||
def _call_api(self, endpoint, video_id, query=None, note='Downloading JSON metadata'):
|
|
||||||
return self._download_json(
|
|
||||||
f'https://api.abema.io/{endpoint}', video_id, query=query or {},
|
|
||||||
note=note,
|
|
||||||
headers={
|
|
||||||
'Authorization': f'bearer {self._get_device_token()}',
|
|
||||||
})
|
|
||||||
|
|
||||||
def _extract_breadcrumb_list(self, webpage, video_id):
|
def _extract_breadcrumb_list(self, webpage, video_id):
|
||||||
for jld in re.finditer(
|
for jld in re.finditer(
|
||||||
r'(?is)</span></li></ul><script[^>]+type=(["\']?)application/ld\+json\1[^>]*>(?P<json_ld>.+?)</script>',
|
r'(?is)</span></li></ul><script[^>]+type=(["\']?)application/ld\+json\1[^>]*>(?P<json_ld>.+?)</script>',
|
||||||
webpage):
|
webpage):
|
||||||
jsonld = self._parse_json(jld.group('json_ld'), video_id, fatal=False)
|
jsonld = self._parse_json(jld.group('json_ld'), video_id, fatal=False)
|
||||||
if traverse_obj(jsonld, '@type') != 'BreadcrumbList':
|
if jsonld:
|
||||||
|
if jsonld.get('@type') != 'BreadcrumbList':
|
||||||
continue
|
continue
|
||||||
items = traverse_obj(jsonld, ('itemListElement', ..., 'name'))
|
trav = traverse_obj(jsonld, ('itemListElement', ..., 'name'))
|
||||||
if items:
|
if trav:
|
||||||
return items
|
return trav
|
||||||
return []
|
return []
|
||||||
|
|
||||||
|
|
||||||
@ -297,7 +207,87 @@ class AbemaTVIE(AbemaTVBaseIE):
|
|||||||
},
|
},
|
||||||
'skip': 'Not supported until yt-dlp implements native live downloader OR AbemaTV can start a local HTTP server',
|
'skip': 'Not supported until yt-dlp implements native live downloader OR AbemaTV can start a local HTTP server',
|
||||||
}]
|
}]
|
||||||
|
_USERTOKEN = None
|
||||||
|
_DEVICE_ID = None
|
||||||
_TIMETABLE = None
|
_TIMETABLE = None
|
||||||
|
_MEDIATOKEN = None
|
||||||
|
|
||||||
|
_SECRETKEY = b'v+Gjs=25Aw5erR!J8ZuvRrCx*rGswhB&qdHd_SYerEWdU&a?3DzN9BRbp5KwY4hEmcj5#fykMjJ=AuWz5GSMY-d@H7DMEh3M@9n2G552Us$$k9cD=3TxwWe86!x#Zyhe'
|
||||||
|
|
||||||
|
def _generate_aks(self, deviceid):
|
||||||
|
deviceid = deviceid.encode('utf-8')
|
||||||
|
# add 1 hour and then drop minute and secs
|
||||||
|
ts_1hour = int((time_seconds(hours=9) // 3600 + 1) * 3600)
|
||||||
|
time_struct = time.gmtime(ts_1hour)
|
||||||
|
ts_1hour_str = str(ts_1hour).encode('utf-8')
|
||||||
|
|
||||||
|
tmp = None
|
||||||
|
|
||||||
|
def mix_once(nonce):
|
||||||
|
nonlocal tmp
|
||||||
|
h = hmac.new(self._SECRETKEY, digestmod=hashlib.sha256)
|
||||||
|
h.update(nonce)
|
||||||
|
tmp = h.digest()
|
||||||
|
|
||||||
|
def mix_tmp(count):
|
||||||
|
nonlocal tmp
|
||||||
|
for i in range(count):
|
||||||
|
mix_once(tmp)
|
||||||
|
|
||||||
|
def mix_twist(nonce):
|
||||||
|
nonlocal tmp
|
||||||
|
mix_once(base64.urlsafe_b64encode(tmp).rstrip(b'=') + nonce)
|
||||||
|
|
||||||
|
mix_once(self._SECRETKEY)
|
||||||
|
mix_tmp(time_struct.tm_mon)
|
||||||
|
mix_twist(deviceid)
|
||||||
|
mix_tmp(time_struct.tm_mday % 5)
|
||||||
|
mix_twist(ts_1hour_str)
|
||||||
|
mix_tmp(time_struct.tm_hour % 5)
|
||||||
|
|
||||||
|
return base64.urlsafe_b64encode(tmp).rstrip(b'=').decode('utf-8')
|
||||||
|
|
||||||
|
def _get_device_token(self):
|
||||||
|
if self._USERTOKEN:
|
||||||
|
return self._USERTOKEN
|
||||||
|
|
||||||
|
self._DEVICE_ID = str(uuid.uuid4())
|
||||||
|
aks = self._generate_aks(self._DEVICE_ID)
|
||||||
|
user_data = self._download_json(
|
||||||
|
'https://api.abema.io/v1/users', None, note='Authorizing',
|
||||||
|
data=json.dumps({
|
||||||
|
'deviceId': self._DEVICE_ID,
|
||||||
|
'applicationKeySecret': aks,
|
||||||
|
}).encode('utf-8'),
|
||||||
|
headers={
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
})
|
||||||
|
self._USERTOKEN = user_data['token']
|
||||||
|
|
||||||
|
# don't allow adding it 2 times or more, though it's guarded
|
||||||
|
remove_opener(self._downloader, AbemaLicenseHandler)
|
||||||
|
add_opener(self._downloader, AbemaLicenseHandler(self))
|
||||||
|
|
||||||
|
return self._USERTOKEN
|
||||||
|
|
||||||
|
def _get_media_token(self, invalidate=False, to_show=True):
|
||||||
|
if not invalidate and self._MEDIATOKEN:
|
||||||
|
return self._MEDIATOKEN
|
||||||
|
|
||||||
|
self._MEDIATOKEN = self._download_json(
|
||||||
|
'https://api.abema.io/v1/media/token', None, note='Fetching media token' if to_show else False,
|
||||||
|
query={
|
||||||
|
'osName': 'android',
|
||||||
|
'osVersion': '6.0.1',
|
||||||
|
'osLang': 'ja_JP',
|
||||||
|
'osTimezone': 'Asia/Tokyo',
|
||||||
|
'appId': 'tv.abema',
|
||||||
|
'appVersion': '3.27.1'
|
||||||
|
}, headers={
|
||||||
|
'Authorization': 'bearer ' + self._get_device_token()
|
||||||
|
})['token']
|
||||||
|
|
||||||
|
return self._MEDIATOKEN
|
||||||
|
|
||||||
def _perform_login(self, username, password):
|
def _perform_login(self, username, password):
|
||||||
if '@' in username: # don't strictly check if it's email address or not
|
if '@' in username: # don't strictly check if it's email address or not
|
||||||
@ -311,13 +301,13 @@ class AbemaTVIE(AbemaTVBaseIE):
|
|||||||
method: username,
|
method: username,
|
||||||
'password': password
|
'password': password
|
||||||
}).encode('utf-8'), headers={
|
}).encode('utf-8'), headers={
|
||||||
'Authorization': f'bearer {self._get_device_token()}',
|
'Authorization': 'bearer ' + self._get_device_token(),
|
||||||
'Origin': 'https://abema.tv',
|
'Origin': 'https://abema.tv',
|
||||||
'Referer': 'https://abema.tv/',
|
'Referer': 'https://abema.tv/',
|
||||||
'Content-Type': 'application/json',
|
'Content-Type': 'application/json',
|
||||||
})
|
})
|
||||||
|
|
||||||
AbemaTVBaseIE._USERTOKEN = login_response['token']
|
self._USERTOKEN = login_response['token']
|
||||||
self._get_media_token(True)
|
self._get_media_token(True)
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
@ -452,7 +442,6 @@ class AbemaTVIE(AbemaTVBaseIE):
|
|||||||
|
|
||||||
class AbemaTVTitleIE(AbemaTVBaseIE):
|
class AbemaTVTitleIE(AbemaTVBaseIE):
|
||||||
_VALID_URL = r'https?://abema\.tv/video/title/(?P<id>[^?/]+)'
|
_VALID_URL = r'https?://abema\.tv/video/title/(?P<id>[^?/]+)'
|
||||||
_PAGE_SIZE = 25
|
|
||||||
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://abema.tv/video/title/90-1597',
|
'url': 'https://abema.tv/video/title/90-1597',
|
||||||
@ -468,39 +457,18 @@ class AbemaTVTitleIE(AbemaTVBaseIE):
|
|||||||
'title': '真心が届く~僕とスターのオフィス・ラブ!?~',
|
'title': '真心が届く~僕とスターのオフィス・ラブ!?~',
|
||||||
},
|
},
|
||||||
'playlist_mincount': 16,
|
'playlist_mincount': 16,
|
||||||
}, {
|
|
||||||
'url': 'https://abema.tv/video/title/25-102',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '25-102',
|
|
||||||
'title': 'ソードアート・オンライン アリシゼーション',
|
|
||||||
},
|
|
||||||
'playlist_mincount': 24,
|
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _fetch_page(self, playlist_id, series_version, page):
|
|
||||||
programs = self._call_api(
|
|
||||||
f'v1/video/series/{playlist_id}/programs', playlist_id,
|
|
||||||
note=f'Downloading page {page + 1}',
|
|
||||||
query={
|
|
||||||
'seriesVersion': series_version,
|
|
||||||
'offset': str(page * self._PAGE_SIZE),
|
|
||||||
'order': 'seq',
|
|
||||||
'limit': str(self._PAGE_SIZE),
|
|
||||||
})
|
|
||||||
yield from (
|
|
||||||
self.url_result(f'https://abema.tv/video/episode/{x}')
|
|
||||||
for x in traverse_obj(programs, ('programs', ..., 'id'), default=[]))
|
|
||||||
|
|
||||||
def _entries(self, playlist_id, series_version):
|
|
||||||
return OnDemandPagedList(
|
|
||||||
functools.partial(self._fetch_page, playlist_id, series_version),
|
|
||||||
self._PAGE_SIZE)
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
playlist_id = self._match_id(url)
|
video_id = self._match_id(url)
|
||||||
series_info = self._call_api(f'v1/video/series/{playlist_id}', playlist_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
return self.playlist_result(
|
playlist_title, breadcrumb = None, self._extract_breadcrumb_list(webpage, video_id)
|
||||||
self._entries(playlist_id, series_info['version']), playlist_id=playlist_id,
|
if breadcrumb:
|
||||||
playlist_title=series_info.get('title'),
|
playlist_title = breadcrumb[-1]
|
||||||
playlist_description=series_info.get('content'))
|
|
||||||
|
playlist = [
|
||||||
|
self.url_result(urljoin('https://abema.tv/', mobj.group(1)))
|
||||||
|
for mobj in re.finditer(r'<li\s*class=".+?EpisodeList.+?"><a\s*href="(/[^"]+?)"', webpage)]
|
||||||
|
|
||||||
|
return self.playlist_result(playlist, playlist_title=playlist_title, playlist_id=video_id)
|
||||||
|
@ -232,7 +232,6 @@ class AdobeTVChannelIE(AdobeTVPlaylistBaseIE):
|
|||||||
class AdobeTVVideoIE(AdobeTVBaseIE):
|
class AdobeTVVideoIE(AdobeTVBaseIE):
|
||||||
IE_NAME = 'adobetv:video'
|
IE_NAME = 'adobetv:video'
|
||||||
_VALID_URL = r'https?://video\.tv\.adobe\.com/v/(?P<id>\d+)'
|
_VALID_URL = r'https?://video\.tv\.adobe\.com/v/(?P<id>\d+)'
|
||||||
_EMBED_REGEX = [r'<iframe[^>]+src=[\'"](?P<url>(?:https?:)?//video\.tv\.adobe\.com/v/\d+[^"]+)[\'"]']
|
|
||||||
|
|
||||||
_TEST = {
|
_TEST = {
|
||||||
# From https://helpx.adobe.com/acrobat/how-to/new-experience-acrobat-dc.html?set=acrobat--get-started--essential-beginners
|
# From https://helpx.adobe.com/acrobat/how-to/new-experience-acrobat-dc.html?set=acrobat--get-started--essential-beginners
|
||||||
|
@ -1,56 +0,0 @@
|
|||||||
import re
|
|
||||||
|
|
||||||
from .common import InfoExtractor
|
|
||||||
from ..utils import url_or_none, merge_dicts
|
|
||||||
|
|
||||||
|
|
||||||
class AngelIE(InfoExtractor):
|
|
||||||
_VALID_URL = r'https?://(?:www\.)?angel\.com/watch/(?P<series>[^/?#]+)/episode/(?P<id>[\w-]+)/season-(?P<season_number>\d+)/episode-(?P<episode_number>\d+)/(?P<title>[^/?#]+)'
|
|
||||||
_TESTS = [{
|
|
||||||
'url': 'https://www.angel.com/watch/tuttle-twins/episode/2f3d0382-ea82-4cdc-958e-84fbadadc710/season-1/episode-1/when-laws-give-you-lemons',
|
|
||||||
'md5': '4734e5cfdd64a568e837246aa3eaa524',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '2f3d0382-ea82-4cdc-958e-84fbadadc710',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'Tuttle Twins Season 1, Episode 1: When Laws Give You Lemons',
|
|
||||||
'description': 'md5:73b704897c20ab59c433a9c0a8202d5e',
|
|
||||||
'thumbnail': r're:^https?://images.angelstudios.com/image/upload/angel-app/.*$',
|
|
||||||
'duration': 1359.0
|
|
||||||
}
|
|
||||||
}, {
|
|
||||||
'url': 'https://www.angel.com/watch/the-chosen/episode/8dfb714d-bca5-4812-8125-24fb9514cd10/season-1/episode-1/i-have-called-you-by-name',
|
|
||||||
'md5': 'e4774bad0a5f0ad2e90d175cafdb797d',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '8dfb714d-bca5-4812-8125-24fb9514cd10',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'The Chosen Season 1, Episode 1: I Have Called You By Name',
|
|
||||||
'description': 'md5:aadfb4827a94415de5ff6426e6dee3be',
|
|
||||||
'thumbnail': r're:^https?://images.angelstudios.com/image/upload/angel-app/.*$',
|
|
||||||
'duration': 3276.0
|
|
||||||
}
|
|
||||||
}]
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
|
||||||
video_id = self._match_id(url)
|
|
||||||
webpage = self._download_webpage(url, video_id)
|
|
||||||
|
|
||||||
json_ld = self._search_json_ld(webpage, video_id)
|
|
||||||
|
|
||||||
formats, subtitles = self._extract_m3u8_formats_and_subtitles(
|
|
||||||
json_ld.pop('url'), video_id, note='Downloading HD m3u8 information')
|
|
||||||
|
|
||||||
info_dict = {
|
|
||||||
'id': video_id,
|
|
||||||
'title': self._og_search_title(webpage),
|
|
||||||
'description': self._og_search_description(webpage),
|
|
||||||
'formats': formats,
|
|
||||||
'subtitles': subtitles
|
|
||||||
}
|
|
||||||
|
|
||||||
# Angel uses cloudinary in the background and supports image transformations.
|
|
||||||
# We remove these transformations and return the source file
|
|
||||||
base_thumbnail_url = url_or_none(self._og_search_thumbnail(webpage)) or json_ld.pop('thumbnails')
|
|
||||||
if base_thumbnail_url:
|
|
||||||
info_dict['thumbnail'] = re.sub(r'(/upload)/.+(/angel-app/.+)$', r'\1\2', base_thumbnail_url)
|
|
||||||
|
|
||||||
return merge_dicts(info_dict, json_ld)
|
|
@ -1,3 +1,4 @@
|
|||||||
|
import re
|
||||||
import urllib.parse
|
import urllib.parse
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
@ -6,6 +7,7 @@ from ..utils import (
|
|||||||
ExtractorError,
|
ExtractorError,
|
||||||
determine_ext,
|
determine_ext,
|
||||||
scale_thumbnails_to_max_format_width,
|
scale_thumbnails_to_max_format_width,
|
||||||
|
unescapeHTML,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -89,7 +91,7 @@ class Ant1NewsGrArticleIE(Ant1NewsGrBaseIE):
|
|||||||
video_id = self._match_id(url)
|
video_id = self._match_id(url)
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
info = self._search_json_ld(webpage, video_id, expected_type='NewsArticle')
|
info = self._search_json_ld(webpage, video_id, expected_type='NewsArticle')
|
||||||
embed_urls = list(Ant1NewsGrEmbedIE._extract_embed_urls(url, webpage))
|
embed_urls = list(Ant1NewsGrEmbedIE._extract_urls(webpage))
|
||||||
if not embed_urls:
|
if not embed_urls:
|
||||||
raise ExtractorError('no videos found for %s' % video_id, expected=True)
|
raise ExtractorError('no videos found for %s' % video_id, expected=True)
|
||||||
return self.playlist_from_matches(
|
return self.playlist_from_matches(
|
||||||
@ -102,7 +104,6 @@ class Ant1NewsGrEmbedIE(Ant1NewsGrBaseIE):
|
|||||||
IE_DESC = 'ant1news.gr embedded videos'
|
IE_DESC = 'ant1news.gr embedded videos'
|
||||||
_BASE_PLAYER_URL_RE = r'(?:https?:)?//(?:[a-zA-Z0-9\-]+\.)?(?:antenna|ant1news)\.gr/templates/pages/player'
|
_BASE_PLAYER_URL_RE = r'(?:https?:)?//(?:[a-zA-Z0-9\-]+\.)?(?:antenna|ant1news)\.gr/templates/pages/player'
|
||||||
_VALID_URL = rf'{_BASE_PLAYER_URL_RE}\?([^#]+&)?cid=(?P<id>[^#&]+)'
|
_VALID_URL = rf'{_BASE_PLAYER_URL_RE}\?([^#]+&)?cid=(?P<id>[^#&]+)'
|
||||||
_EMBED_REGEX = [rf'<iframe[^>]+?src=(?P<_q1>["\'])(?P<url>{_BASE_PLAYER_URL_RE}\?(?:(?!(?P=_q1)).)+)(?P=_q1)']
|
|
||||||
_API_PATH = '/news/templates/data/jsonPlayer'
|
_API_PATH = '/news/templates/data/jsonPlayer'
|
||||||
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
@ -116,6 +117,16 @@ class Ant1NewsGrEmbedIE(Ant1NewsGrBaseIE):
|
|||||||
},
|
},
|
||||||
}]
|
}]
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def _extract_urls(cls, webpage):
|
||||||
|
_EMBED_URL_RE = rf'{cls._BASE_PLAYER_URL_RE}\?(?:(?!(?P=_q1)).)+'
|
||||||
|
_EMBED_RE = rf'<iframe[^>]+?src=(?P<_q1>["\'])(?P<url>{_EMBED_URL_RE})(?P=_q1)'
|
||||||
|
for mobj in re.finditer(_EMBED_RE, webpage):
|
||||||
|
url = unescapeHTML(mobj.group('url'))
|
||||||
|
if not cls.suitable(url):
|
||||||
|
continue
|
||||||
|
yield url
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
video_id = self._match_id(url)
|
||||||
|
|
||||||
|
@ -340,16 +340,30 @@ class AnvatoIE(InfoExtractor):
|
|||||||
'subtitles': subtitles,
|
'subtitles': subtitles,
|
||||||
}
|
}
|
||||||
|
|
||||||
@classmethod
|
@staticmethod
|
||||||
def _extract_from_webpage(cls, url, webpage):
|
def _extract_urls(ie, webpage, video_id):
|
||||||
for mobj in re.finditer(cls._ANVP_RE, webpage):
|
entries = []
|
||||||
anvplayer_data = unescapeHTML(json.loads(mobj.group('anvp'))) or {}
|
for mobj in re.finditer(AnvatoIE._ANVP_RE, webpage):
|
||||||
video_id, access_key = anvplayer_data.get('video'), anvplayer_data.get('accessKey')
|
anvplayer_data = ie._parse_json(
|
||||||
if not access_key:
|
mobj.group('anvp'), video_id, transform_source=unescapeHTML,
|
||||||
access_key = cls._MCP_TO_ACCESS_KEY_TABLE.get((anvplayer_data.get('mcp') or '').lower())
|
fatal=False)
|
||||||
if not (video_id or '').isdigit() or not access_key:
|
if not anvplayer_data:
|
||||||
continue
|
continue
|
||||||
yield cls.url_result(f'anvato:{access_key}:{video_id}', AnvatoIE, video_id)
|
video = anvplayer_data.get('video')
|
||||||
|
if not isinstance(video, compat_str) or not video.isdigit():
|
||||||
|
continue
|
||||||
|
access_key = anvplayer_data.get('accessKey')
|
||||||
|
if not access_key:
|
||||||
|
mcp = anvplayer_data.get('mcp')
|
||||||
|
if mcp:
|
||||||
|
access_key = AnvatoIE._MCP_TO_ACCESS_KEY_TABLE.get(
|
||||||
|
mcp.lower())
|
||||||
|
if not access_key:
|
||||||
|
continue
|
||||||
|
entries.append(ie.url_result(
|
||||||
|
'anvato:%s:%s' % (access_key, video), ie=AnvatoIE.ie_key(),
|
||||||
|
video_id=video))
|
||||||
|
return entries
|
||||||
|
|
||||||
def _extract_anvato_videos(self, webpage, video_id):
|
def _extract_anvato_videos(self, webpage, video_id):
|
||||||
anvplayer_data = self._parse_json(
|
anvplayer_data = self._parse_json(
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
determine_ext,
|
determine_ext,
|
||||||
@ -8,7 +10,6 @@ from ..utils import (
|
|||||||
|
|
||||||
class APAIE(InfoExtractor):
|
class APAIE(InfoExtractor):
|
||||||
_VALID_URL = r'(?P<base_url>https?://[^/]+\.apa\.at)/embed/(?P<id>[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})'
|
_VALID_URL = r'(?P<base_url>https?://[^/]+\.apa\.at)/embed/(?P<id>[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})'
|
||||||
_EMBED_REGEX = [r'<iframe[^>]+\bsrc=(["\'])(?P<url>(?:https?:)?//[^/]+\.apa\.at/embed/[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12}.*?)\1']
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://uvp.apa.at/embed/293f6d17-692a-44e3-9fd5-7b178f3a1029',
|
'url': 'http://uvp.apa.at/embed/293f6d17-692a-44e3-9fd5-7b178f3a1029',
|
||||||
'md5': '2b12292faeb0a7d930c778c7a5b4759b',
|
'md5': '2b12292faeb0a7d930c778c7a5b4759b',
|
||||||
@ -29,6 +30,14 @@ class APAIE(InfoExtractor):
|
|||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _extract_urls(webpage):
|
||||||
|
return [
|
||||||
|
mobj.group('url')
|
||||||
|
for mobj in re.finditer(
|
||||||
|
r'<iframe[^>]+\bsrc=(["\'])(?P<url>(?:https?:)?//[^/]+\.apa\.at/embed/[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12}.*?)\1',
|
||||||
|
webpage)]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = self._match_valid_url(url)
|
mobj = self._match_valid_url(url)
|
||||||
video_id, base_url = mobj.group('id', 'base_url')
|
video_id, base_url = mobj.group('id', 'base_url')
|
||||||
|
@ -10,7 +10,6 @@ from ..utils import (
|
|||||||
|
|
||||||
class AparatIE(InfoExtractor):
|
class AparatIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://(?:www\.)?aparat\.com/(?:v/|video/video/embed/videohash/)(?P<id>[a-zA-Z0-9]+)'
|
_VALID_URL = r'https?://(?:www\.)?aparat\.com/(?:v/|video/video/embed/videohash/)(?P<id>[a-zA-Z0-9]+)'
|
||||||
_EMBED_REGEX = [r'<iframe .*?src="(?P<url>http://www\.aparat\.com/video/[^"]+)"']
|
|
||||||
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://www.aparat.com/v/wP8On',
|
'url': 'http://www.aparat.com/v/wP8On',
|
||||||
|
@ -49,11 +49,6 @@ class ArchiveOrgIE(InfoExtractor):
|
|||||||
'upload_date': '20100315',
|
'upload_date': '20100315',
|
||||||
'creator': 'SRI International',
|
'creator': 'SRI International',
|
||||||
'uploader': 'laura@archive.org',
|
'uploader': 'laura@archive.org',
|
||||||
'thumbnail': r're:https://archive\.org/download/.*\.jpg',
|
|
||||||
'release_year': 1968,
|
|
||||||
'display_id': 'XD300-23_68HighlightsAResearchCntAugHumanIntellect.cdr',
|
|
||||||
'track': 'XD300-23 68HighlightsAResearchCntAugHumanIntellect',
|
|
||||||
|
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://archive.org/details/Cops1922',
|
'url': 'https://archive.org/details/Cops1922',
|
||||||
@ -62,43 +57,33 @@ class ArchiveOrgIE(InfoExtractor):
|
|||||||
'id': 'Cops1922',
|
'id': 'Cops1922',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Buster Keaton\'s "Cops" (1922)',
|
'title': 'Buster Keaton\'s "Cops" (1922)',
|
||||||
'description': 'md5:cd6f9910c35aedd5fc237dbc3957e2ca',
|
'description': 'md5:43a603fd6c5b4b90d12a96b921212b9c',
|
||||||
'uploader': 'yorkmba99@hotmail.com',
|
'uploader': 'yorkmba99@hotmail.com',
|
||||||
'timestamp': 1387699629,
|
'timestamp': 1387699629,
|
||||||
'upload_date': '20131222',
|
'upload_date': '20131222',
|
||||||
'display_id': 'Cops-v2.mp4',
|
|
||||||
'thumbnail': r're:https://archive\.org/download/.*\.jpg',
|
|
||||||
'duration': 1091.96,
|
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://archive.org/embed/XD300-23_68HighlightsAResearchCntAugHumanIntellect',
|
'url': 'http://archive.org/embed/XD300-23_68HighlightsAResearchCntAugHumanIntellect',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://archive.org/details/Election_Ads',
|
'url': 'https://archive.org/details/Election_Ads',
|
||||||
'md5': 'eec5cddebd4793c6a653b69c3b11f2e6',
|
'md5': '284180e857160cf866358700bab668a3',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'Election_Ads/Commercial-JFK1960ElectionAdCampaignJingle.mpg',
|
'id': 'Election_Ads/Commercial-JFK1960ElectionAdCampaignJingle.mpg',
|
||||||
'title': 'Commercial-JFK1960ElectionAdCampaignJingle.mpg',
|
'title': 'Commercial-JFK1960ElectionAdCampaignJingle.mpg',
|
||||||
'ext': 'mpg',
|
'ext': 'mp4',
|
||||||
'thumbnail': r're:https://archive\.org/download/.*\.jpg',
|
|
||||||
'duration': 59.77,
|
|
||||||
'display_id': 'Commercial-JFK1960ElectionAdCampaignJingle.mpg',
|
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://archive.org/details/Election_Ads/Commercial-Nixon1960ElectionAdToughonDefense.mpg',
|
'url': 'https://archive.org/details/Election_Ads/Commercial-Nixon1960ElectionAdToughonDefense.mpg',
|
||||||
'md5': 'ea1eed8234e7d4165f38c8c769edef38',
|
'md5': '7915213ef02559b5501fe630e1a53f59',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'Election_Ads/Commercial-Nixon1960ElectionAdToughonDefense.mpg',
|
'id': 'Election_Ads/Commercial-Nixon1960ElectionAdToughonDefense.mpg',
|
||||||
'title': 'Commercial-Nixon1960ElectionAdToughonDefense.mpg',
|
'title': 'Commercial-Nixon1960ElectionAdToughonDefense.mpg',
|
||||||
'ext': 'mpg',
|
'ext': 'mp4',
|
||||||
'timestamp': 1205588045,
|
'timestamp': 1205588045,
|
||||||
'uploader': 'mikedavisstripmaster@yahoo.com',
|
'uploader': 'mikedavisstripmaster@yahoo.com',
|
||||||
'description': '1960 Presidential Campaign Election Commercials John F Kennedy, Richard M Nixon',
|
'description': '1960 Presidential Campaign Election Commercials John F Kennedy, Richard M Nixon',
|
||||||
'upload_date': '20080315',
|
'upload_date': '20080315',
|
||||||
'display_id': 'Commercial-Nixon1960ElectionAdToughonDefense.mpg',
|
|
||||||
'duration': 59.51,
|
|
||||||
'license': 'http://creativecommons.org/licenses/publicdomain/',
|
|
||||||
'thumbnail': r're:https://archive\.org/download/.*\.jpg',
|
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://archive.org/details/gd1977-05-08.shure57.stevenson.29303.flac16',
|
'url': 'https://archive.org/details/gd1977-05-08.shure57.stevenson.29303.flac16',
|
||||||
@ -107,12 +92,6 @@ class ArchiveOrgIE(InfoExtractor):
|
|||||||
'id': 'gd1977-05-08.shure57.stevenson.29303.flac16/gd1977-05-08d01t01.flac',
|
'id': 'gd1977-05-08.shure57.stevenson.29303.flac16/gd1977-05-08d01t01.flac',
|
||||||
'title': 'Turning',
|
'title': 'Turning',
|
||||||
'ext': 'flac',
|
'ext': 'flac',
|
||||||
'track': 'Turning',
|
|
||||||
'creator': 'Grateful Dead',
|
|
||||||
'display_id': 'gd1977-05-08d01t01.flac',
|
|
||||||
'track_number': 1,
|
|
||||||
'album': '1977-05-08 - Barton Hall - Cornell University',
|
|
||||||
'duration': 39.8,
|
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://archive.org/details/gd1977-05-08.shure57.stevenson.29303.flac16/gd1977-05-08d01t07.flac',
|
'url': 'https://archive.org/details/gd1977-05-08.shure57.stevenson.29303.flac16/gd1977-05-08d01t07.flac',
|
||||||
@ -123,20 +102,11 @@ class ArchiveOrgIE(InfoExtractor):
|
|||||||
'ext': 'flac',
|
'ext': 'flac',
|
||||||
'timestamp': 1205895624,
|
'timestamp': 1205895624,
|
||||||
'uploader': 'mvernon54@yahoo.com',
|
'uploader': 'mvernon54@yahoo.com',
|
||||||
'description': 'md5:6c921464414814720c6593810a5c7e3d',
|
'description': 'md5:6a31f1996db0aa0fc9da6d6e708a1bb0',
|
||||||
'upload_date': '20080319',
|
'upload_date': '20080319',
|
||||||
'location': 'Barton Hall - Cornell University',
|
'location': 'Barton Hall - Cornell University',
|
||||||
'duration': 438.68,
|
|
||||||
'track': 'Deal',
|
|
||||||
'creator': 'Grateful Dead',
|
|
||||||
'album': '1977-05-08 - Barton Hall - Cornell University',
|
|
||||||
'release_date': '19770508',
|
|
||||||
'display_id': 'gd1977-05-08d01t07.flac',
|
|
||||||
'release_year': 1977,
|
|
||||||
'track_number': 7,
|
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
# FIXME: give a better error message than just IndexError when all available formats are restricted
|
|
||||||
'url': 'https://archive.org/details/lp_the-music-of-russia_various-artists-a-askaryan-alexander-melik',
|
'url': 'https://archive.org/details/lp_the-music-of-russia_various-artists-a-askaryan-alexander-melik',
|
||||||
'md5': '7cb019baa9b332e82ea7c10403acd180',
|
'md5': '7cb019baa9b332e82ea7c10403acd180',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
@ -144,7 +114,6 @@ class ArchiveOrgIE(InfoExtractor):
|
|||||||
'title': 'Bells Of Rostov',
|
'title': 'Bells Of Rostov',
|
||||||
'ext': 'mp3',
|
'ext': 'mp3',
|
||||||
},
|
},
|
||||||
'skip': 'restricted'
|
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://archive.org/details/lp_the-music-of-russia_various-artists-a-askaryan-alexander-melik/disc1/02.02.+Song+And+Chorus+In+The+Polovetsian+Camp+From+%22Prince+Igor%22+(Act+2%2C+Scene+1).mp3',
|
'url': 'https://archive.org/details/lp_the-music-of-russia_various-artists-a-askaryan-alexander-melik/disc1/02.02.+Song+And+Chorus+In+The+Polovetsian+Camp+From+%22Prince+Igor%22+(Act+2%2C+Scene+1).mp3',
|
||||||
'md5': '1d0aabe03edca83ca58d9ed3b493a3c3',
|
'md5': '1d0aabe03edca83ca58d9ed3b493a3c3',
|
||||||
@ -157,52 +126,6 @@ class ArchiveOrgIE(InfoExtractor):
|
|||||||
'description': 'md5:012b2d668ae753be36896f343d12a236',
|
'description': 'md5:012b2d668ae753be36896f343d12a236',
|
||||||
'upload_date': '20190928',
|
'upload_date': '20190928',
|
||||||
},
|
},
|
||||||
'skip': 'restricted'
|
|
||||||
}, {
|
|
||||||
# Original formats are private
|
|
||||||
'url': 'https://archive.org/details/irelandthemakingofarepublic',
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'irelandthemakingofarepublic',
|
|
||||||
'title': 'Ireland: The Making of a Republic',
|
|
||||||
'upload_date': '20160610',
|
|
||||||
'description': 'md5:f70956a156645a658a0dc9513d9e78b7',
|
|
||||||
'uploader': 'dimitrios@archive.org',
|
|
||||||
'creator': ['British Broadcasting Corporation', 'Time-Life Films'],
|
|
||||||
'timestamp': 1465594947,
|
|
||||||
},
|
|
||||||
'playlist': [
|
|
||||||
{
|
|
||||||
'md5': '0b211261b26590d49df968f71b90690d',
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'irelandthemakingofarepublic/irelandthemakingofarepublicreel1_01.mov',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'irelandthemakingofarepublicreel1_01.mov',
|
|
||||||
'duration': 130.46,
|
|
||||||
'thumbnail': 'https://archive.org/download/irelandthemakingofarepublic/irelandthemakingofarepublic.thumbs/irelandthemakingofarepublicreel1_01_000117.jpg',
|
|
||||||
'display_id': 'irelandthemakingofarepublicreel1_01.mov',
|
|
||||||
},
|
|
||||||
}, {
|
|
||||||
'md5': '67335ee3b23a0da930841981c1e79b02',
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'irelandthemakingofarepublic/irelandthemakingofarepublicreel1_02.mov',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'duration': 1395.13,
|
|
||||||
'title': 'irelandthemakingofarepublicreel1_02.mov',
|
|
||||||
'display_id': 'irelandthemakingofarepublicreel1_02.mov',
|
|
||||||
'thumbnail': 'https://archive.org/download/irelandthemakingofarepublic/irelandthemakingofarepublic.thumbs/irelandthemakingofarepublicreel1_02_001374.jpg',
|
|
||||||
},
|
|
||||||
}, {
|
|
||||||
'md5': 'e470e86787893603f4a341a16c281eb5',
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'irelandthemakingofarepublic/irelandthemakingofarepublicreel2.mov',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'duration': 1602.67,
|
|
||||||
'title': 'irelandthemakingofarepublicreel2.mov',
|
|
||||||
'thumbnail': 'https://archive.org/download/irelandthemakingofarepublic/irelandthemakingofarepublic.thumbs/irelandthemakingofarepublicreel2_001554.jpg',
|
|
||||||
'display_id': 'irelandthemakingofarepublicreel2.mov',
|
|
||||||
},
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}]
|
}]
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
@ -293,25 +216,17 @@ class ArchiveOrgIE(InfoExtractor):
|
|||||||
'filesize': int_or_none(f.get('size'))})
|
'filesize': int_or_none(f.get('size'))})
|
||||||
|
|
||||||
extension = (f['name'].rsplit('.', 1) + [None])[1]
|
extension = (f['name'].rsplit('.', 1) + [None])[1]
|
||||||
|
if extension in KNOWN_EXTENSIONS:
|
||||||
# We don't want to skip private formats if the user has access to them,
|
|
||||||
# however without access to an account with such privileges we can't implement/test this.
|
|
||||||
# For now to be safe, we will only skip them if there is no user logged in.
|
|
||||||
is_logged_in = bool(self._get_cookies('https://archive.org').get('logged-in-sig'))
|
|
||||||
if extension in KNOWN_EXTENSIONS and (not f.get('private') or is_logged_in):
|
|
||||||
entry['formats'].append({
|
entry['formats'].append({
|
||||||
'url': 'https://archive.org/download/' + identifier + '/' + f['name'],
|
'url': 'https://archive.org/download/' + identifier + '/' + f['name'],
|
||||||
'format': f.get('format'),
|
'format': f.get('format'),
|
||||||
'width': int_or_none(f.get('width')),
|
'width': int_or_none(f.get('width')),
|
||||||
'height': int_or_none(f.get('height')),
|
'height': int_or_none(f.get('height')),
|
||||||
'filesize': int_or_none(f.get('size')),
|
'filesize': int_or_none(f.get('size')),
|
||||||
'protocol': 'https',
|
'protocol': 'https'})
|
||||||
'source_preference': 0 if f.get('source') == 'original' else -1,
|
|
||||||
'format_note': f.get('source')
|
|
||||||
})
|
|
||||||
|
|
||||||
for entry in entries.values():
|
for entry in entries.values():
|
||||||
self._sort_formats(entry['formats'], ('source', ))
|
self._sort_formats(entry['formats'])
|
||||||
|
|
||||||
if len(entries) == 1:
|
if len(entries) == 1:
|
||||||
# If there's only one item, use it as the main info dict
|
# If there's only one item, use it as the main info dict
|
||||||
|
@ -70,8 +70,8 @@ class ArcPublishingIE(InfoExtractor):
|
|||||||
], 'video-api-cdn.%s.arcpublishing.com/api'),
|
], 'video-api-cdn.%s.arcpublishing.com/api'),
|
||||||
]
|
]
|
||||||
|
|
||||||
@classmethod
|
@staticmethod
|
||||||
def _extract_embed_urls(cls, url, webpage):
|
def _extract_urls(webpage):
|
||||||
entries = []
|
entries = []
|
||||||
# https://arcpublishing.atlassian.net/wiki/spaces/POWA/overview
|
# https://arcpublishing.atlassian.net/wiki/spaces/POWA/overview
|
||||||
for powa_el in re.findall(r'(<div[^>]+class="[^"]*\bpowa\b[^"]*"[^>]+data-uuid="%s"[^>]*>)' % ArcPublishingIE._UUID_REGEX, webpage):
|
for powa_el in re.findall(r'(<div[^>]+class="[^"]*\bpowa\b[^"]*"[^>]+data-uuid="%s"[^>]*>)' % ArcPublishingIE._UUID_REGEX, webpage):
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
@ -17,8 +19,6 @@ class ArkenaIE(InfoExtractor):
|
|||||||
play\.arkena\.com/(?:config|embed)/avp/v\d/player/media/(?P<id>[^/]+)/[^/]+/(?P<account_id>\d+)
|
play\.arkena\.com/(?:config|embed)/avp/v\d/player/media/(?P<id>[^/]+)/[^/]+/(?P<account_id>\d+)
|
||||||
)
|
)
|
||||||
'''
|
'''
|
||||||
# See https://support.arkena.com/display/PLAY/Ways+to+embed+your+video
|
|
||||||
_EMBED_REGEX = [r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//play\.arkena\.com/embed/avp/.+?)\1']
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://video.qbrick.com/play2/embed/player?accountId=1034090&mediaId=d8ab4607-00090107-aab86310',
|
'url': 'https://video.qbrick.com/play2/embed/player?accountId=1034090&mediaId=d8ab4607-00090107-aab86310',
|
||||||
'md5': '97f117754e5f3c020f5f26da4a44ebaf',
|
'md5': '97f117754e5f3c020f5f26da4a44ebaf',
|
||||||
@ -50,6 +50,15 @@ class ArkenaIE(InfoExtractor):
|
|||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _extract_url(webpage):
|
||||||
|
# See https://support.arkena.com/display/PLAY/Ways+to+embed+your+video
|
||||||
|
mobj = re.search(
|
||||||
|
r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//play\.arkena\.com/embed/avp/.+?)\1',
|
||||||
|
webpage)
|
||||||
|
if mobj:
|
||||||
|
return mobj.group('url')
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = self._match_valid_url(url)
|
mobj = self._match_valid_url(url)
|
||||||
video_id = mobj.group('id')
|
video_id = mobj.group('id')
|
||||||
|
@ -1,210 +1,190 @@
|
|||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
from ..compat import (
|
||||||
|
compat_str,
|
||||||
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
GeoRestrictedError,
|
|
||||||
int_or_none,
|
int_or_none,
|
||||||
parse_iso8601,
|
|
||||||
parse_qs,
|
parse_qs,
|
||||||
|
qualities,
|
||||||
strip_or_none,
|
strip_or_none,
|
||||||
traverse_obj,
|
try_get,
|
||||||
|
unified_strdate,
|
||||||
url_or_none,
|
url_or_none,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class ArteTVBaseIE(InfoExtractor):
|
class ArteTVBaseIE(InfoExtractor):
|
||||||
_ARTE_LANGUAGES = 'fr|de|en|es|it|pl'
|
_ARTE_LANGUAGES = 'fr|de|en|es|it|pl'
|
||||||
_API_BASE = 'https://api.arte.tv/api/player/v2'
|
_API_BASE = 'https://api.arte.tv/api/player/v1'
|
||||||
|
|
||||||
|
|
||||||
class ArteTVIE(ArteTVBaseIE):
|
class ArteTVIE(ArteTVBaseIE):
|
||||||
_VALID_URL = r'''(?x)
|
_VALID_URL = r'''(?x)
|
||||||
(?:https?://
|
https?://
|
||||||
(?:
|
(?:
|
||||||
(?:www\.)?arte\.tv/(?P<lang>%(langs)s)/videos|
|
(?:www\.)?arte\.tv/(?P<lang>%(langs)s)/videos|
|
||||||
api\.arte\.tv/api/player/v\d+/config/(?P<lang_2>%(langs)s)
|
api\.arte\.tv/api/player/v\d+/config/(?P<lang_2>%(langs)s)
|
||||||
)
|
)
|
||||||
|arte://program)
|
/(?P<id>\d{6}-\d{3}-[AF])
|
||||||
/(?P<id>\d{6}-\d{3}-[AF]|LIVE)
|
|
||||||
''' % {'langs': ArteTVBaseIE._ARTE_LANGUAGES}
|
''' % {'langs': ArteTVBaseIE._ARTE_LANGUAGES}
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://www.arte.tv/en/videos/088501-000-A/mexico-stealing-petrol-to-survive/',
|
'url': 'https://www.arte.tv/en/videos/088501-000-A/mexico-stealing-petrol-to-survive/',
|
||||||
'only_matching': True,
|
'info_dict': {
|
||||||
|
'id': '088501-000-A',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Mexico: Stealing Petrol to Survive',
|
||||||
|
'upload_date': '20190628',
|
||||||
|
},
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://www.arte.tv/pl/videos/100103-000-A/usa-dyskryminacja-na-porodowce/',
|
'url': 'https://www.arte.tv/pl/videos/100103-000-A/usa-dyskryminacja-na-porodowce/',
|
||||||
'info_dict': {
|
'only_matching': True,
|
||||||
'id': '100103-000-A',
|
|
||||||
'title': 'USA: Dyskryminacja na porodówce',
|
|
||||||
'description': 'md5:242017b7cce59ffae340a54baefcafb1',
|
|
||||||
'alt_title': 'ARTE Reportage',
|
|
||||||
'upload_date': '20201103',
|
|
||||||
'duration': 554,
|
|
||||||
'thumbnail': r're:https://api-cdn\.arte\.tv/.+940x530',
|
|
||||||
'timestamp': 1604417980,
|
|
||||||
'ext': 'mp4',
|
|
||||||
},
|
|
||||||
'params': {'skip_download': 'm3u8'}
|
|
||||||
}, {
|
|
||||||
'note': 'No alt_title',
|
|
||||||
'url': 'https://www.arte.tv/fr/videos/110371-000-A/la-chaleur-supplice-des-arbres-de-rue/',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '110371-000-A',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'upload_date': '20220718',
|
|
||||||
'duration': 154,
|
|
||||||
'timestamp': 1658162460,
|
|
||||||
'description': 'md5:5890f36fe7dccfadb8b7c0891de54786',
|
|
||||||
'title': 'La chaleur, supplice des arbres de rue',
|
|
||||||
'thumbnail': 'https://api-cdn.arte.tv/img/v2/image/CPE2sQDtD8GLQgt8DuYHLf/940x530',
|
|
||||||
},
|
|
||||||
'params': {'skip_download': 'm3u8'}
|
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://api.arte.tv/api/player/v2/config/de/100605-013-A',
|
'url': 'https://api.arte.tv/api/player/v2/config/de/100605-013-A',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
}, {
|
|
||||||
'url': 'https://api.arte.tv/api/player/v2/config/de/LIVE',
|
|
||||||
'only_matching': True,
|
|
||||||
}]
|
}]
|
||||||
|
|
||||||
_GEO_BYPASS = True
|
def _real_extract(self, url):
|
||||||
|
mobj = self._match_valid_url(url)
|
||||||
|
video_id = mobj.group('id')
|
||||||
|
lang = mobj.group('lang') or mobj.group('lang_2')
|
||||||
|
|
||||||
_LANG_MAP = { # ISO639 -> French abbreviations
|
info = self._download_json(
|
||||||
|
'%s/config/%s/%s' % (self._API_BASE, lang, video_id), video_id)
|
||||||
|
player_info = info['videoJsonPlayer']
|
||||||
|
|
||||||
|
vsr = try_get(player_info, lambda x: x['VSR'], dict)
|
||||||
|
if not vsr:
|
||||||
|
error = None
|
||||||
|
if try_get(player_info, lambda x: x['custom_msg']['type']) == 'error':
|
||||||
|
error = try_get(
|
||||||
|
player_info, lambda x: x['custom_msg']['msg'], compat_str)
|
||||||
|
if not error:
|
||||||
|
error = 'Video %s is not available' % player_info.get('VID') or video_id
|
||||||
|
raise ExtractorError(error, expected=True)
|
||||||
|
|
||||||
|
upload_date_str = player_info.get('shootingDate')
|
||||||
|
if not upload_date_str:
|
||||||
|
upload_date_str = (player_info.get('VRA') or player_info.get('VDA') or '').split(' ')[0]
|
||||||
|
|
||||||
|
title = (player_info.get('VTI') or player_info['VID']).strip()
|
||||||
|
subtitle = player_info.get('VSU', '').strip()
|
||||||
|
if subtitle:
|
||||||
|
title += ' - %s' % subtitle
|
||||||
|
|
||||||
|
qfunc = qualities(['MQ', 'HQ', 'EQ', 'SQ'])
|
||||||
|
|
||||||
|
LANGS = {
|
||||||
'fr': 'F',
|
'fr': 'F',
|
||||||
'de': 'A',
|
'de': 'A',
|
||||||
'en': 'E[ANG]',
|
'en': 'E[ANG]',
|
||||||
'es': 'E[ESP]',
|
'es': 'E[ESP]',
|
||||||
'it': 'E[ITA]',
|
'it': 'E[ITA]',
|
||||||
'pl': 'E[POL]',
|
'pl': 'E[POL]',
|
||||||
# XXX: probably means mixed; <https://www.arte.tv/en/videos/107710-029-A/dispatches-from-ukraine-local-journalists-report/>
|
|
||||||
# uses this code for audio that happens to be in Ukrainian, but the manifest uses the ISO code 'mul' (mixed)
|
|
||||||
'mul': 'EU',
|
|
||||||
}
|
}
|
||||||
|
|
||||||
_VERSION_CODE_RE = re.compile(r'''(?x)
|
langcode = LANGS.get(lang, lang)
|
||||||
V
|
|
||||||
(?P<original_voice>O?)
|
|
||||||
(?P<vlang>[FA]|E\[[A-Z]+\]|EU)?
|
|
||||||
(?P<audio_desc>AUD|)
|
|
||||||
(?:
|
|
||||||
(?P<has_sub>-ST)
|
|
||||||
(?P<sdh_sub>M?)
|
|
||||||
(?P<sub_lang>[FA]|E\[[A-Z]+\]|EU)
|
|
||||||
)?
|
|
||||||
''')
|
|
||||||
|
|
||||||
# all obtained by exhaustive testing
|
formats = []
|
||||||
_COUNTRIES_MAP = {
|
for format_id, format_dict in vsr.items():
|
||||||
'DE_FR': {
|
f = dict(format_dict)
|
||||||
'BL', 'DE', 'FR', 'GF', 'GP', 'MF', 'MQ', 'NC',
|
format_url = url_or_none(f.get('url'))
|
||||||
'PF', 'PM', 'RE', 'WF', 'YT',
|
streamer = f.get('streamer')
|
||||||
},
|
if not format_url and not streamer:
|
||||||
# with both of the below 'BE' sometimes works, sometimes doesn't
|
continue
|
||||||
'EUR_DE_FR': {
|
versionCode = f.get('versionCode')
|
||||||
'AT', 'BL', 'CH', 'DE', 'FR', 'GF', 'GP', 'LI',
|
l = re.escape(langcode)
|
||||||
'MC', 'MF', 'MQ', 'NC', 'PF', 'PM', 'RE', 'WF',
|
|
||||||
'YT',
|
|
||||||
},
|
|
||||||
'SAT': {
|
|
||||||
'AD', 'AT', 'AX', 'BG', 'BL', 'CH', 'CY', 'CZ',
|
|
||||||
'DE', 'DK', 'EE', 'ES', 'FI', 'FR', 'GB', 'GF',
|
|
||||||
'GR', 'HR', 'HU', 'IE', 'IS', 'IT', 'KN', 'LI',
|
|
||||||
'LT', 'LU', 'LV', 'MC', 'MF', 'MQ', 'MT', 'NC',
|
|
||||||
'NL', 'NO', 'PF', 'PL', 'PM', 'PT', 'RE', 'RO',
|
|
||||||
'SE', 'SI', 'SK', 'SM', 'VA', 'WF', 'YT',
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
# Language preference from most to least priority
|
||||||
mobj = self._match_valid_url(url)
|
# Reference: section 6.8 of
|
||||||
video_id = mobj.group('id')
|
# https://www.arte.tv/sites/en/corporate/files/complete-technical-guidelines-arte-geie-v1-07-1.pdf
|
||||||
lang = mobj.group('lang') or mobj.group('lang_2')
|
PREFERENCES = (
|
||||||
langauge_code = self._LANG_MAP.get(lang)
|
# original version in requested language, without subtitles
|
||||||
|
r'VO{0}$'.format(l),
|
||||||
config = self._download_json(f'{self._API_BASE}/config/{lang}/{video_id}', video_id)
|
# original version in requested language, with partial subtitles in requested language
|
||||||
|
r'VO{0}-ST{0}$'.format(l),
|
||||||
geoblocking = traverse_obj(config, ('data', 'attributes', 'restriction', 'geoblocking')) or {}
|
# original version in requested language, with subtitles for the deaf and hard-of-hearing in requested language
|
||||||
if geoblocking.get('restrictedArea'):
|
r'VO{0}-STM{0}$'.format(l),
|
||||||
raise GeoRestrictedError(f'Video restricted to {geoblocking["code"]!r}',
|
# non-original (dubbed) version in requested language, without subtitles
|
||||||
countries=self._COUNTRIES_MAP.get(geoblocking['code'], ('DE', 'FR')))
|
r'V{0}$'.format(l),
|
||||||
|
# non-original (dubbed) version in requested language, with subtitles partial subtitles in requested language
|
||||||
if not traverse_obj(config, ('data', 'attributes', 'rights')):
|
r'V{0}-ST{0}$'.format(l),
|
||||||
# Eg: https://www.arte.tv/de/videos/097407-215-A/28-minuten
|
# non-original (dubbed) version in requested language, with subtitles for the deaf and hard-of-hearing in requested language
|
||||||
# Eg: https://www.arte.tv/es/videos/104351-002-A/serviteur-du-peuple-1-23
|
r'V{0}-STM{0}$'.format(l),
|
||||||
raise ExtractorError(
|
# original version in requested language, with partial subtitles in different language
|
||||||
'Video is not available in this language edition of Arte or broadcast rights expired', expected=True)
|
r'VO{0}-ST(?!{0}).+?$'.format(l),
|
||||||
|
# original version in requested language, with subtitles for the deaf and hard-of-hearing in different language
|
||||||
formats, subtitles = [], {}
|
r'VO{0}-STM(?!{0}).+?$'.format(l),
|
||||||
for stream in config['data']['attributes']['streams']:
|
# original version in different language, with partial subtitles in requested language
|
||||||
# official player contains code like `e.get("versions")[0].eStat.ml5`
|
r'VO(?:(?!{0}).+?)?-ST{0}$'.format(l),
|
||||||
stream_version = stream['versions'][0]
|
# original version in different language, with subtitles for the deaf and hard-of-hearing in requested language
|
||||||
stream_version_code = stream_version['eStat']['ml5']
|
r'VO(?:(?!{0}).+?)?-STM{0}$'.format(l),
|
||||||
|
# original version in different language, without subtitles
|
||||||
lang_pref = -1
|
r'VO(?:(?!{0}))?$'.format(l),
|
||||||
m = self._VERSION_CODE_RE.match(stream_version_code)
|
# original version in different language, with partial subtitles in different language
|
||||||
if m:
|
r'VO(?:(?!{0}).+?)?-ST(?!{0}).+?$'.format(l),
|
||||||
lang_pref = int(''.join('01'[x] for x in (
|
# original version in different language, with subtitles for the deaf and hard-of-hearing in different language
|
||||||
m.group('vlang') == langauge_code, # we prefer voice in the requested language
|
r'VO(?:(?!{0}).+?)?-STM(?!{0}).+?$'.format(l),
|
||||||
not m.group('audio_desc'), # and not the audio description version
|
)
|
||||||
bool(m.group('original_voice')), # but if voice is not in the requested language, at least choose the original voice
|
|
||||||
m.group('sub_lang') == langauge_code, # if subtitles are present, we prefer them in the requested language
|
|
||||||
not m.group('has_sub'), # but we prefer no subtitles otherwise
|
|
||||||
not m.group('sdh_sub'), # and we prefer not the hard-of-hearing subtitles if there are subtitles
|
|
||||||
)))
|
|
||||||
|
|
||||||
if stream['protocol'].startswith('HLS'):
|
|
||||||
fmts, subs = self._extract_m3u8_formats_and_subtitles(
|
|
||||||
stream['url'], video_id=video_id, ext='mp4', m3u8_id=stream_version_code, fatal=False)
|
|
||||||
for fmt in fmts:
|
|
||||||
fmt.update({
|
|
||||||
'format_note': f'{stream_version.get("label", "unknown")} [{stream_version.get("shortLabel", "?")}]',
|
|
||||||
'language_preference': lang_pref,
|
|
||||||
})
|
|
||||||
formats.extend(fmts)
|
|
||||||
self._merge_subtitles(subs, target=subtitles)
|
|
||||||
|
|
||||||
elif stream['protocol'] in ('HTTPS', 'RTMP'):
|
|
||||||
formats.append({
|
|
||||||
'format_id': f'{stream["protocol"]}-{stream_version_code}',
|
|
||||||
'url': stream['url'],
|
|
||||||
'format_note': f'{stream_version.get("label", "unknown")} [{stream_version.get("shortLabel", "?")}]',
|
|
||||||
'language_preference': lang_pref,
|
|
||||||
# 'ext': 'mp4', # XXX: may or may not be necessary, at least for HTTPS
|
|
||||||
})
|
|
||||||
|
|
||||||
|
for pref, p in enumerate(PREFERENCES):
|
||||||
|
if re.match(p, versionCode):
|
||||||
|
lang_pref = len(PREFERENCES) - pref
|
||||||
|
break
|
||||||
else:
|
else:
|
||||||
self.report_warning(f'Skipping stream with unknown protocol {stream["protocol"]}')
|
lang_pref = -1
|
||||||
|
format_note = '%s, %s' % (f.get('versionCode'), f.get('versionLibelle'))
|
||||||
|
|
||||||
# TODO: chapters from stream['segments']?
|
media_type = f.get('mediaType')
|
||||||
# The JS also looks for chapters in config['data']['attributes']['chapters'],
|
if media_type == 'hls':
|
||||||
# but I am yet to find a video having those
|
m3u8_formats = self._extract_m3u8_formats(
|
||||||
|
format_url, video_id, 'mp4', entry_protocol='m3u8_native',
|
||||||
|
m3u8_id=format_id, fatal=False)
|
||||||
|
for m3u8_format in m3u8_formats:
|
||||||
|
m3u8_format.update({
|
||||||
|
'language_preference': lang_pref,
|
||||||
|
'format_note': format_note,
|
||||||
|
})
|
||||||
|
formats.extend(m3u8_formats)
|
||||||
|
continue
|
||||||
|
|
||||||
self._sort_formats(formats)
|
format = {
|
||||||
|
'format_id': format_id,
|
||||||
|
'language_preference': lang_pref,
|
||||||
|
'format_note': format_note,
|
||||||
|
'width': int_or_none(f.get('width')),
|
||||||
|
'height': int_or_none(f.get('height')),
|
||||||
|
'tbr': int_or_none(f.get('bitrate')),
|
||||||
|
'quality': qfunc(f.get('quality')),
|
||||||
|
}
|
||||||
|
|
||||||
metadata = config['data']['attributes']['metadata']
|
if media_type == 'rtmp':
|
||||||
|
format['url'] = f['streamer']
|
||||||
|
format['play_path'] = 'mp4:' + f['url']
|
||||||
|
format['ext'] = 'flv'
|
||||||
|
else:
|
||||||
|
format['url'] = f['url']
|
||||||
|
|
||||||
|
formats.append(format)
|
||||||
|
|
||||||
|
# For this extractor, quality only represents the relative quality
|
||||||
|
# with respect to other formats with the same resolution
|
||||||
|
self._sort_formats(formats, ('res', 'quality'))
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': metadata['providerId'],
|
'id': player_info.get('VID') or video_id,
|
||||||
'webpage_url': traverse_obj(metadata, ('link', 'url')),
|
'title': title,
|
||||||
'title': traverse_obj(metadata, 'subtitle', 'title'),
|
'description': player_info.get('VDE') or player_info.get('V7T'),
|
||||||
'alt_title': metadata.get('subtitle') and metadata.get('title'),
|
'upload_date': unified_strdate(upload_date_str),
|
||||||
'description': metadata.get('description'),
|
'thumbnail': player_info.get('programImage') or player_info.get('VTU', {}).get('IUR'),
|
||||||
'duration': traverse_obj(metadata, ('duration', 'seconds')),
|
|
||||||
'language': metadata.get('language'),
|
|
||||||
'timestamp': traverse_obj(config, ('data', 'attributes', 'rights', 'begin'), expected_type=parse_iso8601),
|
|
||||||
'is_live': config['data']['attributes'].get('live', False),
|
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
'subtitles': subtitles,
|
|
||||||
'thumbnails': [
|
|
||||||
{'url': image['url'], 'id': image.get('caption')}
|
|
||||||
for image in metadata.get('images') or [] if url_or_none(image.get('url'))
|
|
||||||
],
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
class ArteTVEmbedIE(InfoExtractor):
|
class ArteTVEmbedIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://(?:www\.)?arte\.tv/player/v\d+/index\.php\?.*?\bjson_url=.+'
|
_VALID_URL = r'https?://(?:www\.)?arte\.tv/player/v\d+/index\.php\?.*?\bjson_url=.+'
|
||||||
_EMBED_REGEX = [r'<(?:iframe|script)[^>]+src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?arte\.tv/player/v\d+/index\.php\?.*?\bjson_url=.+?)\1']
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://www.arte.tv/player/v5/index.php?json_url=https%3A%2F%2Fapi.arte.tv%2Fapi%2Fplayer%2Fv2%2Fconfig%2Fde%2F100605-013-A&lang=de&autoplay=true&mute=0100605-013-A',
|
'url': 'https://www.arte.tv/player/v5/index.php?json_url=https%3A%2F%2Fapi.arte.tv%2Fapi%2Fplayer%2Fv2%2Fconfig%2Fde%2F100605-013-A&lang=de&autoplay=true&mute=0100605-013-A',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
@ -214,12 +194,17 @@ class ArteTVEmbedIE(InfoExtractor):
|
|||||||
'description': 'md5:be40b667f45189632b78c1425c7c2ce1',
|
'description': 'md5:be40b667f45189632b78c1425c7c2ce1',
|
||||||
'upload_date': '20201116',
|
'upload_date': '20201116',
|
||||||
},
|
},
|
||||||
'skip': 'No video available'
|
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://www.arte.tv/player/v3/index.php?json_url=https://api.arte.tv/api/player/v2/config/de/100605-013-A',
|
'url': 'https://www.arte.tv/player/v3/index.php?json_url=https://api.arte.tv/api/player/v2/config/de/100605-013-A',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _extract_urls(webpage):
|
||||||
|
return [url for _, url in re.findall(
|
||||||
|
r'<(?:iframe|script)[^>]+src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?arte\.tv/player/v\d+/index\.php\?.*?\bjson_url=.+?)\1',
|
||||||
|
webpage)]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
qs = parse_qs(url)
|
qs = parse_qs(url)
|
||||||
json_url = qs['json_url'][0]
|
json_url = qs['json_url'][0]
|
||||||
@ -232,36 +217,44 @@ class ArteTVPlaylistIE(ArteTVBaseIE):
|
|||||||
_VALID_URL = r'https?://(?:www\.)?arte\.tv/(?P<lang>%s)/videos/(?P<id>RC-\d{6})' % ArteTVBaseIE._ARTE_LANGUAGES
|
_VALID_URL = r'https?://(?:www\.)?arte\.tv/(?P<lang>%s)/videos/(?P<id>RC-\d{6})' % ArteTVBaseIE._ARTE_LANGUAGES
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://www.arte.tv/en/videos/RC-016954/earn-a-living/',
|
'url': 'https://www.arte.tv/en/videos/RC-016954/earn-a-living/',
|
||||||
'only_matching': True,
|
'info_dict': {
|
||||||
|
'id': 'RC-016954',
|
||||||
|
'title': 'Earn a Living',
|
||||||
|
'description': 'md5:d322c55011514b3a7241f7fb80d494c2',
|
||||||
|
},
|
||||||
|
'playlist_mincount': 6,
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://www.arte.tv/pl/videos/RC-014123/arte-reportage/',
|
'url': 'https://www.arte.tv/pl/videos/RC-014123/arte-reportage/',
|
||||||
'playlist_mincount': 100,
|
'only_matching': True,
|
||||||
'info_dict': {
|
|
||||||
'description': 'md5:84e7bf1feda248bc325ebfac818c476e',
|
|
||||||
'id': 'RC-014123',
|
|
||||||
'title': 'ARTE Reportage - najlepsze reportaże',
|
|
||||||
},
|
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
lang, playlist_id = self._match_valid_url(url).group('lang', 'id')
|
lang, playlist_id = self._match_valid_url(url).groups()
|
||||||
playlist = self._download_json(
|
collection = self._download_json(
|
||||||
f'{self._API_BASE}/playlist/{lang}/{playlist_id}', playlist_id)['data']['attributes']
|
'%s/collectionData/%s/%s?source=videos'
|
||||||
|
% (self._API_BASE, lang, playlist_id), playlist_id)
|
||||||
entries = [{
|
entries = []
|
||||||
|
for video in collection['videos']:
|
||||||
|
if not isinstance(video, dict):
|
||||||
|
continue
|
||||||
|
video_url = url_or_none(video.get('url')) or url_or_none(video.get('jsonUrl'))
|
||||||
|
if not video_url:
|
||||||
|
continue
|
||||||
|
video_id = video.get('programId')
|
||||||
|
entries.append({
|
||||||
'_type': 'url_transparent',
|
'_type': 'url_transparent',
|
||||||
'url': video['config']['url'],
|
'url': video_url,
|
||||||
'ie_key': ArteTVIE.ie_key(),
|
'id': video_id,
|
||||||
'id': video.get('providerId'),
|
|
||||||
'title': video.get('title'),
|
'title': video.get('title'),
|
||||||
'alt_title': video.get('subtitle'),
|
'alt_title': video.get('subtitle'),
|
||||||
'thumbnail': url_or_none(traverse_obj(video, ('mainImage', 'url'))),
|
'thumbnail': url_or_none(try_get(video, lambda x: x['mainImage']['url'], compat_str)),
|
||||||
'duration': int_or_none(traverse_obj(video, ('duration', 'seconds'))),
|
'duration': int_or_none(video.get('durationSeconds')),
|
||||||
} for video in traverse_obj(playlist, ('items', lambda _, v: v['config']['url']))]
|
'view_count': int_or_none(video.get('views')),
|
||||||
|
'ie_key': ArteTVIE.ie_key(),
|
||||||
return self.playlist_result(entries, playlist_id,
|
})
|
||||||
traverse_obj(playlist, ('metadata', 'title')),
|
title = collection.get('title')
|
||||||
traverse_obj(playlist, ('metadata', 'description')))
|
description = collection.get('shortDescription') or collection.get('teaserText')
|
||||||
|
return self.playlist_result(entries, playlist_id, title, description)
|
||||||
|
|
||||||
|
|
||||||
class ArteTVCategoryIE(ArteTVBaseIE):
|
class ArteTVCategoryIE(ArteTVBaseIE):
|
||||||
@ -274,13 +267,14 @@ class ArteTVCategoryIE(ArteTVBaseIE):
|
|||||||
'description': 'Investigative documentary series, geopolitical analysis, and international commentary',
|
'description': 'Investigative documentary series, geopolitical analysis, and international commentary',
|
||||||
},
|
},
|
||||||
'playlist_mincount': 13,
|
'playlist_mincount': 13,
|
||||||
}]
|
},
|
||||||
|
]
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def suitable(cls, url):
|
def suitable(cls, url):
|
||||||
return (
|
return (
|
||||||
not any(ie.suitable(url) for ie in (ArteTVIE, ArteTVPlaylistIE, ))
|
not any(ie.suitable(url) for ie in (ArteTVIE, ArteTVPlaylistIE, ))
|
||||||
and super().suitable(url))
|
and super(ArteTVCategoryIE, cls).suitable(url))
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
lang, playlist_id = self._match_valid_url(url).groups()
|
lang, playlist_id = self._match_valid_url(url).groups()
|
||||||
|
@ -22,7 +22,6 @@ from ..utils import (
|
|||||||
|
|
||||||
class BandcampIE(InfoExtractor):
|
class BandcampIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://[^/]+\.bandcamp\.com/track/(?P<id>[^/?#&]+)'
|
_VALID_URL = r'https?://[^/]+\.bandcamp\.com/track/(?P<id>[^/?#&]+)'
|
||||||
_EMBED_REGEX = [r'<meta property="og:url"[^>]*?content="(?P<url>.*?bandcamp\.com.*?)"']
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://youtube-dl.bandcamp.com/track/youtube-dl-test-song',
|
'url': 'http://youtube-dl.bandcamp.com/track/youtube-dl-test-song',
|
||||||
'md5': 'c557841d5e50261777a6585648adf439',
|
'md5': 'c557841d5e50261777a6585648adf439',
|
||||||
|
@ -46,7 +46,6 @@ class BBCCoUkIE(InfoExtractor):
|
|||||||
)
|
)
|
||||||
(?P<id>%s)(?!/(?:episodes|broadcasts|clips))
|
(?P<id>%s)(?!/(?:episodes|broadcasts|clips))
|
||||||
''' % _ID_REGEX
|
''' % _ID_REGEX
|
||||||
_EMBED_REGEX = [r'setPlaylist\("(?P<url>https?://www\.bbc\.co\.uk/iplayer/[^/]+/[\da-z]{8})"\)']
|
|
||||||
|
|
||||||
_LOGIN_URL = 'https://account.bbc.com/signin'
|
_LOGIN_URL = 'https://account.bbc.com/signin'
|
||||||
_NETRC_MACHINE = 'bbc'
|
_NETRC_MACHINE = 'bbc'
|
||||||
@ -1232,7 +1231,7 @@ class BBCIE(BBCCoUkIE):
|
|||||||
(lambda x: x['data']['blocks'],
|
(lambda x: x['data']['blocks'],
|
||||||
lambda x: x['data']['content']['model']['blocks'],),
|
lambda x: x['data']['content']['model']['blocks'],),
|
||||||
list) or []):
|
list) or []):
|
||||||
if block.get('type') not in ['media', 'video']:
|
if block.get('type') != 'media':
|
||||||
continue
|
continue
|
||||||
parse_media(block.get('model'))
|
parse_media(block.get('model'))
|
||||||
return self.playlist_result(
|
return self.playlist_result(
|
||||||
|
@ -13,7 +13,6 @@ from ..utils import (
|
|||||||
|
|
||||||
class BitChuteIE(InfoExtractor):
|
class BitChuteIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://(?:www\.)?bitchute\.com/(?:video|embed|torrent/[^/]+)/(?P<id>[^/?#&]+)'
|
_VALID_URL = r'https?://(?:www\.)?bitchute\.com/(?:video|embed|torrent/[^/]+)/(?P<id>[^/?#&]+)'
|
||||||
_EMBED_REGEX = [rf'<(?:script|iframe)[^>]+\bsrc=(["\'])(?P<url>{_VALID_URL})']
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://www.bitchute.com/video/UGlrF9o9b-Q/',
|
'url': 'https://www.bitchute.com/video/UGlrF9o9b-Q/',
|
||||||
'md5': '7e427d7ed7af5a75b5855705ec750e2b',
|
'md5': '7e427d7ed7af5a75b5855705ec750e2b',
|
||||||
@ -34,6 +33,14 @@ class BitChuteIE(InfoExtractor):
|
|||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _extract_urls(webpage):
|
||||||
|
return [
|
||||||
|
mobj.group('url')
|
||||||
|
for mobj in re.finditer(
|
||||||
|
r'<(?:script|iframe)[^>]+\bsrc=(["\'])(?P<url>%s)' % BitChuteIE._VALID_URL,
|
||||||
|
webpage)]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
video_id = self._match_id(url)
|
||||||
|
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
import re
|
||||||
|
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
mimetype2ext,
|
mimetype2ext,
|
||||||
parse_duration,
|
parse_duration,
|
||||||
@ -11,7 +13,7 @@ from .common import InfoExtractor
|
|||||||
class BloggerIE(InfoExtractor):
|
class BloggerIE(InfoExtractor):
|
||||||
IE_NAME = 'blogger.com'
|
IE_NAME = 'blogger.com'
|
||||||
_VALID_URL = r'https?://(?:www\.)?blogger\.com/video\.g\?token=(?P<id>.+)'
|
_VALID_URL = r'https?://(?:www\.)?blogger\.com/video\.g\?token=(?P<id>.+)'
|
||||||
_EMBED_REGEX = [r'''<iframe[^>]+src=["'](?P<url>(?:https?:)?//(?:www\.)?blogger\.com/video\.g\?token=[^"']+)["']''']
|
_VALID_EMBED = r'''<iframe[^>]+src=["']((?:https?:)?//(?:www\.)?blogger\.com/video\.g\?token=[^"']+)["']'''
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://www.blogger.com/video.g?token=AD6v5dzEe9hfcARr5Hlq1WTkYy6t-fXH3BBahVhGvVHe5szdEUBEloSEDSTA8-b111089KbfWuBvTN7fnbxMtymsHhXAXwVvyzHH4Qch2cfLQdGxKQrrEuFpC1amSl_9GuLWODjPgw',
|
'url': 'https://www.blogger.com/video.g?token=AD6v5dzEe9hfcARr5Hlq1WTkYy6t-fXH3BBahVhGvVHe5szdEUBEloSEDSTA8-b111089KbfWuBvTN7fnbxMtymsHhXAXwVvyzHH4Qch2cfLQdGxKQrrEuFpC1amSl_9GuLWODjPgw',
|
||||||
'md5': 'f1bc19b6ea1b0fd1d81e84ca9ec467ac',
|
'md5': 'f1bc19b6ea1b0fd1d81e84ca9ec467ac',
|
||||||
@ -24,6 +26,10 @@ class BloggerIE(InfoExtractor):
|
|||||||
}
|
}
|
||||||
}]
|
}]
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _extract_urls(webpage):
|
||||||
|
return re.findall(BloggerIE._VALID_EMBED, webpage)
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
token_id = self._match_id(url)
|
token_id = self._match_id(url)
|
||||||
webpage = self._download_webpage(url, token_id)
|
webpage = self._download_webpage(url, token_id)
|
||||||
|
@ -402,11 +402,11 @@ class BrightcoveNewIE(AdobePassIE):
|
|||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _extract_url(ie, webpage):
|
def _extract_url(ie, webpage):
|
||||||
urls = BrightcoveNewIE._extract_brightcove_urls(ie, webpage)
|
urls = BrightcoveNewIE._extract_urls(ie, webpage)
|
||||||
return urls[0] if urls else None
|
return urls[0] if urls else None
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _extract_brightcove_urls(ie, webpage):
|
def _extract_urls(ie, webpage):
|
||||||
# Reference:
|
# Reference:
|
||||||
# 1. http://docs.brightcove.com/en/video-cloud/brightcove-player/guides/publish-video.html#setvideoiniframe
|
# 1. http://docs.brightcove.com/en/video-cloud/brightcove-player/guides/publish-video.html#setvideoiniframe
|
||||||
# 2. http://docs.brightcove.com/en/video-cloud/brightcove-player/guides/publish-video.html#tag
|
# 2. http://docs.brightcove.com/en/video-cloud/brightcove-player/guides/publish-video.html#tag
|
||||||
|
@ -81,7 +81,7 @@ class BuzzFeedIE(InfoExtractor):
|
|||||||
continue
|
continue
|
||||||
entries.append(self.url_result(video['url']))
|
entries.append(self.url_result(video['url']))
|
||||||
|
|
||||||
facebook_urls = FacebookIE._extract_embed_urls(url, webpage)
|
facebook_urls = FacebookIE._extract_urls(webpage)
|
||||||
entries.extend([
|
entries.extend([
|
||||||
self.url_result(facebook_url)
|
self.url_result(facebook_url)
|
||||||
for facebook_url in facebook_urls])
|
for facebook_url in facebook_urls])
|
||||||
|
@ -1,71 +0,0 @@
|
|||||||
import os
|
|
||||||
import urllib.parse
|
|
||||||
|
|
||||||
from .common import InfoExtractor
|
|
||||||
from ..utils import float_or_none
|
|
||||||
|
|
||||||
|
|
||||||
class CamtasiaEmbedIE(InfoExtractor):
|
|
||||||
_VALID_URL = False
|
|
||||||
_WEBPAGE_TESTS = [
|
|
||||||
{
|
|
||||||
'url': 'http://www.ll.mit.edu/workshops/education/videocourses/antennas/lecture1/video/',
|
|
||||||
'playlist': [{
|
|
||||||
'md5': '0c5e352edabf715d762b0ad4e6d9ee67',
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'Fenn-AA_PA_Radar_Course_Lecture_1c_Final',
|
|
||||||
'title': 'Fenn-AA_PA_Radar_Course_Lecture_1c_Final - video1',
|
|
||||||
'ext': 'flv',
|
|
||||||
'duration': 2235.90,
|
|
||||||
}
|
|
||||||
}, {
|
|
||||||
'md5': '10e4bb3aaca9fd630e273ff92d9f3c63',
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'Fenn-AA_PA_Radar_Course_Lecture_1c_Final_PIP',
|
|
||||||
'title': 'Fenn-AA_PA_Radar_Course_Lecture_1c_Final - pip',
|
|
||||||
'ext': 'flv',
|
|
||||||
'duration': 2235.93,
|
|
||||||
}
|
|
||||||
}],
|
|
||||||
'info_dict': {
|
|
||||||
'title': 'Fenn-AA_PA_Radar_Course_Lecture_1c_Final',
|
|
||||||
},
|
|
||||||
'skip': 'webpage dead'
|
|
||||||
},
|
|
||||||
|
|
||||||
]
|
|
||||||
|
|
||||||
def _extract_from_webpage(self, url, webpage):
|
|
||||||
camtasia_cfg = self._search_regex(
|
|
||||||
r'fo\.addVariable\(\s*"csConfigFile",\s*"([^"]+)"\s*\);',
|
|
||||||
webpage, 'camtasia configuration file', default=None)
|
|
||||||
if camtasia_cfg is None:
|
|
||||||
return None
|
|
||||||
|
|
||||||
title = self._html_search_meta('DC.title', webpage, fatal=True)
|
|
||||||
|
|
||||||
camtasia_url = urllib.parse.urljoin(url, camtasia_cfg)
|
|
||||||
camtasia_cfg = self._download_xml(
|
|
||||||
camtasia_url, self._generic_id(url),
|
|
||||||
note='Downloading camtasia configuration',
|
|
||||||
errnote='Failed to download camtasia configuration')
|
|
||||||
fileset_node = camtasia_cfg.find('./playlist/array/fileset')
|
|
||||||
|
|
||||||
entries = []
|
|
||||||
for n in fileset_node.getchildren():
|
|
||||||
url_n = n.find('./uri')
|
|
||||||
if url_n is None:
|
|
||||||
continue
|
|
||||||
|
|
||||||
entries.append({
|
|
||||||
'id': os.path.splitext(url_n.text.rpartition('/')[2])[0],
|
|
||||||
'title': f'{title} - {n.tag}',
|
|
||||||
'url': urllib.parse.urljoin(url, url_n.text),
|
|
||||||
'duration': float_or_none(n.find('./duration').text),
|
|
||||||
})
|
|
||||||
|
|
||||||
return {
|
|
||||||
'_type': 'playlist',
|
|
||||||
'entries': entries,
|
|
||||||
'title': title,
|
|
||||||
}
|
|
@ -14,7 +14,6 @@ class Channel9IE(InfoExtractor):
|
|||||||
IE_DESC = 'Channel 9'
|
IE_DESC = 'Channel 9'
|
||||||
IE_NAME = 'channel9'
|
IE_NAME = 'channel9'
|
||||||
_VALID_URL = r'https?://(?:www\.)?(?:channel9\.msdn\.com|s\.ch9\.ms)/(?P<contentpath>.+?)(?P<rss>/RSS)?/?(?:[?#&]|$)'
|
_VALID_URL = r'https?://(?:www\.)?(?:channel9\.msdn\.com|s\.ch9\.ms)/(?P<contentpath>.+?)(?P<rss>/RSS)?/?(?:[?#&]|$)'
|
||||||
_EMBED_REGEX = [r'<iframe[^>]+src=["\'](?P<url>https?://channel9\.msdn\.com/(?:[^/]+/)+)player\b']
|
|
||||||
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://channel9.msdn.com/Events/TechEd/Australia/2013/KOS002',
|
'url': 'http://channel9.msdn.com/Events/TechEd/Australia/2013/KOS002',
|
||||||
@ -79,6 +78,12 @@ class Channel9IE(InfoExtractor):
|
|||||||
|
|
||||||
_RSS_URL = 'http://channel9.msdn.com/%s/RSS'
|
_RSS_URL = 'http://channel9.msdn.com/%s/RSS'
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _extract_urls(webpage):
|
||||||
|
return re.findall(
|
||||||
|
r'<iframe[^>]+src=["\'](https?://channel9\.msdn\.com/(?:[^/]+/)+)player\b',
|
||||||
|
webpage)
|
||||||
|
|
||||||
def _extract_list(self, video_id, rss_url=None):
|
def _extract_list(self, video_id, rss_url=None):
|
||||||
if not rss_url:
|
if not rss_url:
|
||||||
rss_url = self._RSS_URL % video_id
|
rss_url = self._RSS_URL % video_id
|
||||||
|
@ -7,8 +7,6 @@ from ..utils import (
|
|||||||
|
|
||||||
class CinchcastIE(InfoExtractor):
|
class CinchcastIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://player\.cinchcast\.com/.*?(?:assetId|show_id)=(?P<id>[0-9]+)'
|
_VALID_URL = r'https?://player\.cinchcast\.com/.*?(?:assetId|show_id)=(?P<id>[0-9]+)'
|
||||||
_EMBED_REGEX = [r'<iframe[^>]+?src=(["\'])(?P<url>https?://player\.cinchcast\.com/.+?)\1']
|
|
||||||
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://player.cinchcast.com/?show_id=5258197&platformId=1&assetType=single',
|
'url': 'http://player.cinchcast.com/?show_id=5258197&platformId=1&assetType=single',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
import base64
|
import base64
|
||||||
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
|
||||||
@ -15,7 +16,6 @@ class CloudflareStreamIE(InfoExtractor):
|
|||||||
)
|
)
|
||||||
(?P<id>%s)
|
(?P<id>%s)
|
||||||
''' % (_DOMAIN_RE, _EMBED_RE, _ID_RE)
|
''' % (_DOMAIN_RE, _EMBED_RE, _ID_RE)
|
||||||
_EMBED_REGEX = [fr'<script[^>]+\bsrc=(["\'])(?P<url>(?:https?:)?//{_EMBED_RE}(?:{_ID_RE}).*?)\1']
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://embed.cloudflarestream.com/embed/we4g.fla9.latest.js?video=31c9291ab41fac05471db4e73aa11717',
|
'url': 'https://embed.cloudflarestream.com/embed/we4g.fla9.latest.js?video=31c9291ab41fac05471db4e73aa11717',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
@ -37,13 +37,21 @@ class CloudflareStreamIE(InfoExtractor):
|
|||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _extract_urls(webpage):
|
||||||
|
return [
|
||||||
|
mobj.group('url')
|
||||||
|
for mobj in re.finditer(
|
||||||
|
r'<script[^>]+\bsrc=(["\'])(?P<url>(?:https?:)?//%s(?:%s).*?)\1' % (CloudflareStreamIE._EMBED_RE, CloudflareStreamIE._ID_RE),
|
||||||
|
webpage)]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
video_id = self._match_id(url)
|
||||||
domain = 'bytehighway.net' if 'bytehighway.net/' in url else 'videodelivery.net'
|
domain = 'bytehighway.net' if 'bytehighway.net/' in url else 'videodelivery.net'
|
||||||
base_url = 'https://%s/%s/' % (domain, video_id)
|
base_url = 'https://%s/%s/' % (domain, video_id)
|
||||||
if '.' in video_id:
|
if '.' in video_id:
|
||||||
video_id = self._parse_json(base64.urlsafe_b64decode(
|
video_id = self._parse_json(base64.urlsafe_b64decode(
|
||||||
video_id.split('.')[1] + '==='), video_id)['sub']
|
video_id.split('.')[1]), video_id)['sub']
|
||||||
manifest_base_url = base_url + 'manifest/video.'
|
manifest_base_url = base_url + 'manifest/video.'
|
||||||
|
|
||||||
formats = self._extract_m3u8_formats(
|
formats = self._extract_m3u8_formats(
|
||||||
|
@ -11,20 +11,17 @@ import math
|
|||||||
import netrc
|
import netrc
|
||||||
import os
|
import os
|
||||||
import random
|
import random
|
||||||
import re
|
|
||||||
import sys
|
import sys
|
||||||
import time
|
import time
|
||||||
import types
|
|
||||||
import urllib.parse
|
import urllib.parse
|
||||||
import urllib.request
|
import urllib.request
|
||||||
import xml.etree.ElementTree
|
import xml.etree.ElementTree
|
||||||
|
|
||||||
from ..compat import functools # isort: split
|
from ..compat import functools, re # isort: split
|
||||||
from ..compat import compat_etree_fromstring, compat_expanduser, compat_os_name
|
from ..compat import compat_etree_fromstring, compat_expanduser, compat_os_name
|
||||||
from ..downloader import FileDownloader
|
from ..downloader import FileDownloader
|
||||||
from ..downloader.f4m import get_base_url, remove_encrypted_media
|
from ..downloader.f4m import get_base_url, remove_encrypted_media
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
IDENTITY,
|
|
||||||
JSON_LD_RE,
|
JSON_LD_RE,
|
||||||
NO_DEFAULT,
|
NO_DEFAULT,
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
@ -32,7 +29,6 @@ from ..utils import (
|
|||||||
GeoUtils,
|
GeoUtils,
|
||||||
LenientJSONDecoder,
|
LenientJSONDecoder,
|
||||||
RegexNotFoundError,
|
RegexNotFoundError,
|
||||||
RetryManager,
|
|
||||||
UnsupportedError,
|
UnsupportedError,
|
||||||
age_restricted,
|
age_restricted,
|
||||||
base_url,
|
base_url,
|
||||||
@ -62,7 +58,6 @@ from ..utils import (
|
|||||||
parse_m3u8_attributes,
|
parse_m3u8_attributes,
|
||||||
parse_resolution,
|
parse_resolution,
|
||||||
sanitize_filename,
|
sanitize_filename,
|
||||||
sanitize_url,
|
|
||||||
sanitized_Request,
|
sanitized_Request,
|
||||||
str_or_none,
|
str_or_none,
|
||||||
str_to_int,
|
str_to_int,
|
||||||
@ -154,7 +149,6 @@ class InfoExtractor:
|
|||||||
* abr Average audio bitrate in KBit/s
|
* abr Average audio bitrate in KBit/s
|
||||||
* acodec Name of the audio codec in use
|
* acodec Name of the audio codec in use
|
||||||
* asr Audio sampling rate in Hertz
|
* asr Audio sampling rate in Hertz
|
||||||
* audio_channels Number of audio channels
|
|
||||||
* vbr Average video bitrate in KBit/s
|
* vbr Average video bitrate in KBit/s
|
||||||
* fps Frame rate
|
* fps Frame rate
|
||||||
* vcodec Name of the video codec in use
|
* vcodec Name of the video codec in use
|
||||||
@ -317,8 +311,7 @@ class InfoExtractor:
|
|||||||
live stream that goes on instead of a fixed-length video.
|
live stream that goes on instead of a fixed-length video.
|
||||||
was_live: True, False, or None (=unknown). Whether this video was
|
was_live: True, False, or None (=unknown). Whether this video was
|
||||||
originally a live stream.
|
originally a live stream.
|
||||||
live_status: None (=unknown), 'is_live', 'is_upcoming', 'was_live', 'not_live',
|
live_status: 'is_live', 'is_upcoming', 'was_live', 'not_live' or None (=unknown)
|
||||||
or 'post_live' (was live, but VOD is not yet processed)
|
|
||||||
If absent, automatically set from is_live, was_live
|
If absent, automatically set from is_live, was_live
|
||||||
start_time: Time in seconds where the reproduction should start, as
|
start_time: Time in seconds where the reproduction should start, as
|
||||||
specified in the URL.
|
specified in the URL.
|
||||||
@ -336,7 +329,6 @@ class InfoExtractor:
|
|||||||
'private', 'premium_only', 'subscriber_only', 'needs_auth',
|
'private', 'premium_only', 'subscriber_only', 'needs_auth',
|
||||||
'unlisted' or 'public'. Use 'InfoExtractor._availability'
|
'unlisted' or 'public'. Use 'InfoExtractor._availability'
|
||||||
to set it
|
to set it
|
||||||
_old_archive_ids: A list of old archive ids needed for backward compatibility
|
|
||||||
__post_extractor: A function to be called just before the metadata is
|
__post_extractor: A function to be called just before the metadata is
|
||||||
written to either disk, logger or console. The function
|
written to either disk, logger or console. The function
|
||||||
must return a dict which will be added to the info_dict.
|
must return a dict which will be added to the info_dict.
|
||||||
@ -437,26 +429,14 @@ class InfoExtractor:
|
|||||||
title, description etc.
|
title, description etc.
|
||||||
|
|
||||||
|
|
||||||
Subclasses of this should also be added to the list of extractors and
|
Subclasses of this should define a _VALID_URL regexp and, re-define the
|
||||||
should define a _VALID_URL regexp and, re-define the _real_extract() and
|
_real_extract() and (optionally) _real_initialize() methods.
|
||||||
(optionally) _real_initialize() methods.
|
Probably, they should also be added to the list of extractors.
|
||||||
|
|
||||||
Subclasses may also override suitable() if necessary, but ensure the function
|
Subclasses may also override suitable() if necessary, but ensure the function
|
||||||
signature is preserved and that this function imports everything it needs
|
signature is preserved and that this function imports everything it needs
|
||||||
(except other extractors), so that lazy_extractors works correctly.
|
(except other extractors), so that lazy_extractors works correctly.
|
||||||
|
|
||||||
Subclasses can define a list of _EMBED_REGEX, which will be searched for in
|
|
||||||
the HTML of Generic webpages. It may also override _extract_embed_urls
|
|
||||||
or _extract_from_webpage as necessary. While these are normally classmethods,
|
|
||||||
_extract_from_webpage is allowed to be an instance method.
|
|
||||||
|
|
||||||
_extract_from_webpage may raise self.StopExtraction() to stop further
|
|
||||||
processing of the webpage and obtain exclusive rights to it. This is useful
|
|
||||||
when the extractor cannot reliably be matched using just the URL.
|
|
||||||
Eg: invidious/peertube instances
|
|
||||||
|
|
||||||
Embed-only extractors can be defined by setting _VALID_URL = False.
|
|
||||||
|
|
||||||
To support username + password (or netrc) login, the extractor must define a
|
To support username + password (or netrc) login, the extractor must define a
|
||||||
_NETRC_MACHINE and re-define _perform_login(username, password) and
|
_NETRC_MACHINE and re-define _perform_login(username, password) and
|
||||||
(optionally) _initialize_pre_login() methods. The _perform_login method will
|
(optionally) _initialize_pre_login() methods. The _perform_login method will
|
||||||
@ -494,8 +474,6 @@ class InfoExtractor:
|
|||||||
_NETRC_MACHINE = None
|
_NETRC_MACHINE = None
|
||||||
IE_DESC = None
|
IE_DESC = None
|
||||||
SEARCH_KEY = None
|
SEARCH_KEY = None
|
||||||
_VALID_URL = None
|
|
||||||
_EMBED_REGEX = []
|
|
||||||
|
|
||||||
def _login_hint(self, method=NO_DEFAULT, netrc=None):
|
def _login_hint(self, method=NO_DEFAULT, netrc=None):
|
||||||
password_hint = f'--username and --password, or --netrc ({netrc or self._NETRC_MACHINE}) to provide account credentials'
|
password_hint = f'--username and --password, or --netrc ({netrc or self._NETRC_MACHINE}) to provide account credentials'
|
||||||
@ -519,12 +497,12 @@ class InfoExtractor:
|
|||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def _match_valid_url(cls, url):
|
def _match_valid_url(cls, url):
|
||||||
if cls._VALID_URL is False:
|
|
||||||
return None
|
|
||||||
# This does not use has/getattr intentionally - we want to know whether
|
# This does not use has/getattr intentionally - we want to know whether
|
||||||
# we have cached the regexp for *this* class, whereas getattr would also
|
# we have cached the regexp for *this* class, whereas getattr would also
|
||||||
# match the superclass
|
# match the superclass
|
||||||
if '_VALID_URL_RE' not in cls.__dict__:
|
if '_VALID_URL_RE' not in cls.__dict__:
|
||||||
|
if '_VALID_URL' not in cls.__dict__:
|
||||||
|
cls._VALID_URL = cls._make_valid_url()
|
||||||
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
|
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
|
||||||
return cls._VALID_URL_RE.match(url)
|
return cls._VALID_URL_RE.match(url)
|
||||||
|
|
||||||
@ -668,10 +646,10 @@ class InfoExtractor:
|
|||||||
return None
|
return None
|
||||||
if self._x_forwarded_for_ip:
|
if self._x_forwarded_for_ip:
|
||||||
ie_result['__x_forwarded_for_ip'] = self._x_forwarded_for_ip
|
ie_result['__x_forwarded_for_ip'] = self._x_forwarded_for_ip
|
||||||
subtitles = ie_result.get('subtitles') or {}
|
subtitles = ie_result.get('subtitles')
|
||||||
if 'no-live-chat' in self.get_param('compat_opts'):
|
if (subtitles and 'live_chat' in subtitles
|
||||||
for lang in ('live_chat', 'comments', 'danmaku'):
|
and 'no-live-chat' in self.get_param('compat_opts', [])):
|
||||||
subtitles.pop(lang, None)
|
del subtitles['live_chat']
|
||||||
return ie_result
|
return ie_result
|
||||||
except GeoRestrictedError as e:
|
except GeoRestrictedError as e:
|
||||||
if self.__maybe_fake_ip_and_retry(e.countries):
|
if self.__maybe_fake_ip_and_retry(e.countries):
|
||||||
@ -1163,12 +1141,10 @@ class InfoExtractor:
|
|||||||
'url': url,
|
'url': url,
|
||||||
}
|
}
|
||||||
|
|
||||||
@classmethod
|
def playlist_from_matches(self, matches, playlist_id=None, playlist_title=None, getter=None, ie=None, video_kwargs=None, **kwargs):
|
||||||
def playlist_from_matches(cls, matches, playlist_id=None, playlist_title=None,
|
urls = (self.url_result(self._proto_relative_url(m), ie, **(video_kwargs or {}))
|
||||||
getter=IDENTITY, ie=None, video_kwargs=None, **kwargs):
|
for m in orderedSet(map(getter, matches) if getter else matches))
|
||||||
return cls.playlist_result(
|
return self.playlist_result(urls, playlist_id, playlist_title, **kwargs)
|
||||||
(cls.url_result(m, ie, **(video_kwargs or {})) for m in orderedSet(map(getter, matches), lazy=True)),
|
|
||||||
playlist_id, playlist_title, **kwargs)
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def playlist_result(entries, playlist_id=None, playlist_title=None, playlist_description=None, *, multi_video=False, **kwargs):
|
def playlist_result(entries, playlist_id=None, playlist_title=None, playlist_description=None, *, multi_video=False, **kwargs):
|
||||||
@ -1375,20 +1351,12 @@ class InfoExtractor:
|
|||||||
def _dc_search_uploader(self, html):
|
def _dc_search_uploader(self, html):
|
||||||
return self._html_search_meta('dc.creator', html, 'uploader')
|
return self._html_search_meta('dc.creator', html, 'uploader')
|
||||||
|
|
||||||
@staticmethod
|
def _rta_search(self, html):
|
||||||
def _rta_search(html):
|
|
||||||
# See http://www.rtalabel.org/index.php?content=howtofaq#single
|
# See http://www.rtalabel.org/index.php?content=howtofaq#single
|
||||||
if re.search(r'(?ix)<meta\s+name="rating"\s+'
|
if re.search(r'(?ix)<meta\s+name="rating"\s+'
|
||||||
r' content="RTA-5042-1996-1400-1577-RTA"',
|
r' content="RTA-5042-1996-1400-1577-RTA"',
|
||||||
html):
|
html):
|
||||||
return 18
|
return 18
|
||||||
|
|
||||||
# And then there are the jokers who advertise that they use RTA, but actually don't.
|
|
||||||
AGE_LIMIT_MARKERS = [
|
|
||||||
r'Proudly Labeled <a href="http://www\.rtalabel\.org/" title="Restricted to Adults">RTA</a>',
|
|
||||||
]
|
|
||||||
if any(re.search(marker, html) for marker in AGE_LIMIT_MARKERS):
|
|
||||||
return 18
|
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
def _media_rating_search(self, html):
|
def _media_rating_search(self, html):
|
||||||
@ -1669,7 +1637,7 @@ class InfoExtractor:
|
|||||||
regex = r' *((?P<reverse>\+)?(?P<field>[a-zA-Z0-9_]+)((?P<separator>[~:])(?P<limit>.*?))?)? *$'
|
regex = r' *((?P<reverse>\+)?(?P<field>[a-zA-Z0-9_]+)((?P<separator>[~:])(?P<limit>.*?))?)? *$'
|
||||||
|
|
||||||
default = ('hidden', 'aud_or_vid', 'hasvid', 'ie_pref', 'lang', 'quality',
|
default = ('hidden', 'aud_or_vid', 'hasvid', 'ie_pref', 'lang', 'quality',
|
||||||
'res', 'fps', 'hdr:12', 'channels', 'codec:vp9.2', 'size', 'br', 'asr',
|
'res', 'fps', 'hdr:12', 'codec:vp9.2', 'size', 'br', 'asr',
|
||||||
'proto', 'ext', 'hasaud', 'source', 'id') # These must not be aliases
|
'proto', 'ext', 'hasaud', 'source', 'id') # These must not be aliases
|
||||||
ytdl_default = ('hasaud', 'lang', 'quality', 'tbr', 'filesize', 'vbr',
|
ytdl_default = ('hasaud', 'lang', 'quality', 'tbr', 'filesize', 'vbr',
|
||||||
'height', 'width', 'proto', 'vext', 'abr', 'aext',
|
'height', 'width', 'proto', 'vext', 'abr', 'aext',
|
||||||
@ -1705,7 +1673,6 @@ class InfoExtractor:
|
|||||||
'height': {'convert': 'float_none'},
|
'height': {'convert': 'float_none'},
|
||||||
'width': {'convert': 'float_none'},
|
'width': {'convert': 'float_none'},
|
||||||
'fps': {'convert': 'float_none'},
|
'fps': {'convert': 'float_none'},
|
||||||
'channels': {'convert': 'float_none', 'field': 'audio_channels'},
|
|
||||||
'tbr': {'convert': 'float_none'},
|
'tbr': {'convert': 'float_none'},
|
||||||
'vbr': {'convert': 'float_none'},
|
'vbr': {'convert': 'float_none'},
|
||||||
'abr': {'convert': 'float_none'},
|
'abr': {'convert': 'float_none'},
|
||||||
@ -1719,14 +1686,13 @@ class InfoExtractor:
|
|||||||
'res': {'type': 'multiple', 'field': ('height', 'width'),
|
'res': {'type': 'multiple', 'field': ('height', 'width'),
|
||||||
'function': lambda it: (lambda l: min(l) if l else 0)(tuple(filter(None, it)))},
|
'function': lambda it: (lambda l: min(l) if l else 0)(tuple(filter(None, it)))},
|
||||||
|
|
||||||
# Actual field names
|
# For compatibility with youtube-dl
|
||||||
'format_id': {'type': 'alias', 'field': 'id'},
|
'format_id': {'type': 'alias', 'field': 'id'},
|
||||||
'preference': {'type': 'alias', 'field': 'ie_pref'},
|
'preference': {'type': 'alias', 'field': 'ie_pref'},
|
||||||
'language_preference': {'type': 'alias', 'field': 'lang'},
|
'language_preference': {'type': 'alias', 'field': 'lang'},
|
||||||
'source_preference': {'type': 'alias', 'field': 'source'},
|
'source_preference': {'type': 'alias', 'field': 'source'},
|
||||||
'protocol': {'type': 'alias', 'field': 'proto'},
|
'protocol': {'type': 'alias', 'field': 'proto'},
|
||||||
'filesize_approx': {'type': 'alias', 'field': 'fs_approx'},
|
'filesize_approx': {'type': 'alias', 'field': 'fs_approx'},
|
||||||
'audio_channels': {'type': 'alias', 'field': 'channels'},
|
|
||||||
|
|
||||||
# Deprecated
|
# Deprecated
|
||||||
'dimension': {'type': 'alias', 'field': 'res', 'deprecated': True},
|
'dimension': {'type': 'alias', 'field': 'res', 'deprecated': True},
|
||||||
@ -1997,9 +1963,14 @@ class InfoExtractor:
|
|||||||
else 'https:')
|
else 'https:')
|
||||||
|
|
||||||
def _proto_relative_url(self, url, scheme=None):
|
def _proto_relative_url(self, url, scheme=None):
|
||||||
scheme = scheme or self.http_scheme()
|
if url is None:
|
||||||
assert scheme.endswith(':')
|
return url
|
||||||
return sanitize_url(url, scheme=scheme[:-1])
|
if url.startswith('//'):
|
||||||
|
if scheme is None:
|
||||||
|
scheme = self.http_scheme()
|
||||||
|
return scheme + url
|
||||||
|
else:
|
||||||
|
return url
|
||||||
|
|
||||||
def _sleep(self, timeout, video_id, msg_template=None):
|
def _sleep(self, timeout, video_id, msg_template=None):
|
||||||
if msg_template is None:
|
if msg_template is None:
|
||||||
@ -3670,18 +3641,11 @@ class InfoExtractor:
|
|||||||
t['name'] = cls.ie_key()
|
t['name'] = cls.ie_key()
|
||||||
yield t
|
yield t
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def get_webpage_testcases(cls):
|
|
||||||
tests = getattr(cls, '_WEBPAGE_TESTS', [])
|
|
||||||
for t in tests:
|
|
||||||
t['name'] = cls.ie_key()
|
|
||||||
return tests
|
|
||||||
|
|
||||||
@classproperty
|
@classproperty
|
||||||
def age_limit(cls):
|
def age_limit(cls):
|
||||||
"""Get age limit from the testcases"""
|
"""Get age limit from the testcases"""
|
||||||
return max(traverse_obj(
|
return max(traverse_obj(
|
||||||
(*cls.get_testcases(include_onlymatching=False), *cls.get_webpage_testcases()),
|
tuple(cls.get_testcases(include_onlymatching=False)),
|
||||||
(..., (('playlist', 0), None), 'info_dict', 'age_limit')) or [0])
|
(..., (('playlist', 0), None), 'info_dict', 'age_limit')) or [0])
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@ -3801,12 +3765,10 @@ class InfoExtractor:
|
|||||||
headers['Ytdl-request-proxy'] = geo_verification_proxy
|
headers['Ytdl-request-proxy'] = geo_verification_proxy
|
||||||
return headers
|
return headers
|
||||||
|
|
||||||
@staticmethod
|
def _generic_id(self, url):
|
||||||
def _generic_id(url):
|
|
||||||
return urllib.parse.unquote(os.path.splitext(url.rstrip('/').split('/')[-1])[0])
|
return urllib.parse.unquote(os.path.splitext(url.rstrip('/').split('/')[-1])[0])
|
||||||
|
|
||||||
@staticmethod
|
def _generic_title(self, url):
|
||||||
def _generic_title(url):
|
|
||||||
return urllib.parse.unquote(os.path.splitext(url_basename(url))[0])
|
return urllib.parse.unquote(os.path.splitext(url_basename(url))[0])
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
@ -3852,52 +3814,6 @@ class InfoExtractor:
|
|||||||
self.to_screen(f'Downloading {playlist_label}{playlist_id} - add --no-playlist to download just the {video_label}{video_id}')
|
self.to_screen(f'Downloading {playlist_label}{playlist_id} - add --no-playlist to download just the {video_label}{video_id}')
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def _error_or_warning(self, err, _count=None, _retries=0, *, fatal=True):
|
|
||||||
RetryManager.report_retry(err, _count or int(fatal), _retries, info=self.to_screen, warn=self.report_warning,
|
|
||||||
sleep_func=self.get_param('retry_sleep_functions', {}).get('extractor'))
|
|
||||||
|
|
||||||
def RetryManager(self, **kwargs):
|
|
||||||
return RetryManager(self.get_param('extractor_retries', 3), self._error_or_warning, **kwargs)
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def extract_from_webpage(cls, ydl, url, webpage):
|
|
||||||
ie = (cls if isinstance(cls._extract_from_webpage, types.MethodType)
|
|
||||||
else ydl.get_info_extractor(cls.ie_key()))
|
|
||||||
for info in ie._extract_from_webpage(url, webpage) or []:
|
|
||||||
# url = None since we do not want to set (webpage/original)_url
|
|
||||||
ydl.add_default_extra_info(info, ie, None)
|
|
||||||
yield info
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def _extract_from_webpage(cls, url, webpage):
|
|
||||||
for embed_url in orderedSet(
|
|
||||||
cls._extract_embed_urls(url, webpage) or [], lazy=True):
|
|
||||||
yield cls.url_result(embed_url, cls)
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def _extract_embed_urls(cls, url, webpage):
|
|
||||||
"""@returns all the embed urls on the webpage"""
|
|
||||||
if '_EMBED_URL_RE' not in cls.__dict__:
|
|
||||||
assert isinstance(cls._EMBED_REGEX, (list, tuple))
|
|
||||||
for idx, regex in enumerate(cls._EMBED_REGEX):
|
|
||||||
assert regex.count('(?P<url>') == 1, \
|
|
||||||
f'{cls.__name__}._EMBED_REGEX[{idx}] must have exactly 1 url group\n\t{regex}'
|
|
||||||
cls._EMBED_URL_RE = tuple(map(re.compile, cls._EMBED_REGEX))
|
|
||||||
|
|
||||||
for regex in cls._EMBED_URL_RE:
|
|
||||||
for mobj in regex.finditer(webpage):
|
|
||||||
embed_url = urllib.parse.urljoin(url, unescapeHTML(mobj.group('url')))
|
|
||||||
if cls._VALID_URL is False or cls.suitable(embed_url):
|
|
||||||
yield embed_url
|
|
||||||
|
|
||||||
class StopExtraction(Exception):
|
|
||||||
pass
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def _extract_url(cls, webpage): # TODO: Remove
|
|
||||||
"""Only for compatibility with some older extractors"""
|
|
||||||
return next(iter(cls._extract_embed_urls(None, webpage) or []), None)
|
|
||||||
|
|
||||||
|
|
||||||
class SearchInfoExtractor(InfoExtractor):
|
class SearchInfoExtractor(InfoExtractor):
|
||||||
"""
|
"""
|
||||||
@ -3908,8 +3824,8 @@ class SearchInfoExtractor(InfoExtractor):
|
|||||||
|
|
||||||
_MAX_RESULTS = float('inf')
|
_MAX_RESULTS = float('inf')
|
||||||
|
|
||||||
@classproperty
|
@classmethod
|
||||||
def _VALID_URL(cls):
|
def _make_valid_url(cls):
|
||||||
return r'%s(?P<prefix>|[1-9][0-9]*|all):(?P<query>[\s\S]+)' % cls._SEARCH_KEY
|
return r'%s(?P<prefix>|[1-9][0-9]*|all):(?P<query>[\s\S]+)' % cls._SEARCH_KEY
|
||||||
|
|
||||||
def _real_extract(self, query):
|
def _real_extract(self, query):
|
||||||
|
@ -4,7 +4,9 @@ from ..utils import ExtractorError
|
|||||||
|
|
||||||
class CommonMistakesIE(InfoExtractor):
|
class CommonMistakesIE(InfoExtractor):
|
||||||
IE_DESC = False # Do not list
|
IE_DESC = False # Do not list
|
||||||
_VALID_URL = r'(?:url|URL|yt-dlp)$'
|
_VALID_URL = r'''(?x)
|
||||||
|
(?:url|URL)$
|
||||||
|
'''
|
||||||
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'url',
|
'url': 'url',
|
||||||
|
@ -58,10 +58,7 @@ class CondeNastIE(InfoExtractor):
|
|||||||
)''' % '|'.join(_SITES.keys())
|
)''' % '|'.join(_SITES.keys())
|
||||||
IE_DESC = 'Condé Nast media group: %s' % ', '.join(sorted(_SITES.values()))
|
IE_DESC = 'Condé Nast media group: %s' % ', '.join(sorted(_SITES.values()))
|
||||||
|
|
||||||
_EMBED_REGEX = [r'''(?x)
|
EMBED_URL = r'(?:https?:)?//player(?:-backend)?\.(?:%s)\.com/(?:embed(?:js)?|(?:script|inline)/video)/.+?' % '|'.join(_SITES.keys())
|
||||||
<(?:iframe|script)[^>]+?src=(["\'])(?P<url>
|
|
||||||
(?:https?:)?//player(?:-backend)?\.(?:%s)\.com/(?:embed(?:js)?|(?:script|inline)/video)/.+?
|
|
||||||
)\1''' % '|'.join(_SITES.keys())]
|
|
||||||
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://video.wired.com/watch/3d-printed-speakers-lit-with-led',
|
'url': 'http://video.wired.com/watch/3d-printed-speakers-lit-with-led',
|
||||||
|
@ -7,8 +7,6 @@ from ..utils import (
|
|||||||
|
|
||||||
class CrooksAndLiarsIE(InfoExtractor):
|
class CrooksAndLiarsIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://embed\.crooksandliars\.com/(?:embed|v)/(?P<id>[A-Za-z0-9]+)'
|
_VALID_URL = r'https?://embed\.crooksandliars\.com/(?:embed|v)/(?P<id>[A-Za-z0-9]+)'
|
||||||
_EMBED_REGEX = [r'<(?:iframe[^>]+src|param[^>]+value)=(["\'])(?P<url>(?:https?:)?//embed\.crooksandliars\.com/(?:embed|v)/.+?)\1']
|
|
||||||
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://embed.crooksandliars.com/embed/8RUoRhRi',
|
'url': 'https://embed.crooksandliars.com/embed/8RUoRhRi',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
|
@ -28,7 +28,6 @@ from ..utils import (
|
|||||||
join_nonempty,
|
join_nonempty,
|
||||||
lowercase_escape,
|
lowercase_escape,
|
||||||
merge_dicts,
|
merge_dicts,
|
||||||
parse_iso8601,
|
|
||||||
qualities,
|
qualities,
|
||||||
remove_end,
|
remove_end,
|
||||||
sanitized_Request,
|
sanitized_Request,
|
||||||
@ -650,7 +649,7 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
|
|||||||
|
|
||||||
class CrunchyrollShowPlaylistIE(CrunchyrollBaseIE):
|
class CrunchyrollShowPlaylistIE(CrunchyrollBaseIE):
|
||||||
IE_NAME = 'crunchyroll:playlist'
|
IE_NAME = 'crunchyroll:playlist'
|
||||||
_VALID_URL = r'https?://(?:(?P<prefix>www|m)\.)?(?P<url>crunchyroll\.com/(?:\w{2}(?:-\w{2})?/)?(?!(?:news|anime-news|library|forum|launchcalendar|lineup|store|comics|freetrial|login|media-\d+))(?P<id>[\w\-]+))/?(?:\?|$)'
|
_VALID_URL = r'https?://(?:(?P<prefix>www|m)\.)?(?P<url>crunchyroll\.com/(?:\w{1,2}/)?(?!(?:news|anime-news|library|forum|launchcalendar|lineup|store|comics|freetrial|login|media-\d+))(?P<id>[\w\-]+))/?(?:\?|$)'
|
||||||
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://www.crunchyroll.com/a-bridge-to-the-starry-skies-hoshizora-e-kakaru-hashi',
|
'url': 'https://www.crunchyroll.com/a-bridge-to-the-starry-skies-hoshizora-e-kakaru-hashi',
|
||||||
@ -758,33 +757,50 @@ class CrunchyrollBetaBaseIE(CrunchyrollBaseIE):
|
|||||||
|
|
||||||
class CrunchyrollBetaIE(CrunchyrollBetaBaseIE):
|
class CrunchyrollBetaIE(CrunchyrollBetaBaseIE):
|
||||||
IE_NAME = 'crunchyroll:beta'
|
IE_NAME = 'crunchyroll:beta'
|
||||||
_VALID_URL = r'https?://beta\.crunchyroll\.com/(?P<lang>(?:\w{2}(?:-\w{2})?/)?)watch/(?P<id>\w+)/(?P<display_id>[\w\-]*)/?(?:\?|$)'
|
_VALID_URL = r'https?://beta\.crunchyroll\.com/(?P<lang>(?:\w{1,2}/)?)watch/(?P<id>\w+)/(?P<display_id>[\w\-]*)/?(?:\?|$)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://beta.crunchyroll.com/watch/GY2P1Q98Y/to-the-future',
|
'url': 'https://beta.crunchyroll.com/watch/GY2P1Q98Y/to-the-future',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'GY2P1Q98Y',
|
'id': '696363',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'duration': 1380.241,
|
'timestamp': 1459610100,
|
||||||
'timestamp': 1459632600,
|
|
||||||
'description': 'md5:a022fbec4fbb023d43631032c91ed64b',
|
'description': 'md5:a022fbec4fbb023d43631032c91ed64b',
|
||||||
|
'uploader': 'Toei Animation',
|
||||||
'title': 'World Trigger Episode 73 – To the Future',
|
'title': 'World Trigger Episode 73 – To the Future',
|
||||||
'upload_date': '20160402',
|
'upload_date': '20160402',
|
||||||
'series': 'World Trigger',
|
|
||||||
'series_id': 'GR757DMKY',
|
|
||||||
'season': 'World Trigger',
|
|
||||||
'season_id': 'GR9P39NJ6',
|
|
||||||
'season_number': 1,
|
|
||||||
'episode': 'To the Future',
|
|
||||||
'episode_number': 73,
|
'episode_number': 73,
|
||||||
'thumbnail': r're:^https://beta.crunchyroll.com/imgsrv/.*\.jpeg$',
|
'series': 'World Trigger',
|
||||||
|
'average_rating': 4.9,
|
||||||
|
'episode': 'To the Future',
|
||||||
|
'season': 'World Trigger',
|
||||||
|
'thumbnail': 'https://img1.ak.crunchyroll.com/i/spire3-tmb/c870dedca1a83137c2d3d144984155ed1459527119_main.jpg',
|
||||||
|
'season_number': 1,
|
||||||
},
|
},
|
||||||
'params': {'skip_download': 'm3u8'},
|
'params': {'skip_download': 'm3u8'},
|
||||||
|
'expected_warnings': ['Unable to download XML']
|
||||||
|
}, {
|
||||||
|
'url': 'https://beta.crunchyroll.com/watch/GYK53DMPR/wicked-lord-shingan-reborn',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '648781',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'episode_number': 1,
|
||||||
|
'timestamp': 1389173400,
|
||||||
|
'series': 'Love, Chunibyo & Other Delusions - Heart Throb -',
|
||||||
|
'description': 'md5:5579d1a0355cc618558ba23d27067a62',
|
||||||
|
'uploader': 'TBS',
|
||||||
|
'episode': 'Wicked Lord Shingan... Reborn',
|
||||||
|
'average_rating': 4.9,
|
||||||
|
'season': 'Love, Chunibyo & Other Delusions - Heart Throb -',
|
||||||
|
'thumbnail': 'https://img1.ak.crunchyroll.com/i/spire3-tmb/2ba0384e225a5370d5f0ee9496d91ea51389046521_main.jpg',
|
||||||
|
'title': 'Love, Chunibyo & Other Delusions - Heart Throb - Episode 1 – Wicked Lord Shingan... Reborn',
|
||||||
|
'season_number': 2,
|
||||||
|
'upload_date': '20140108',
|
||||||
|
},
|
||||||
|
'params': {'skip_download': 'm3u8'},
|
||||||
|
'expected_warnings': ['Unable to download XML']
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://beta.crunchyroll.com/watch/GY2P1Q98Y/',
|
'url': 'https://beta.crunchyroll.com/watch/GY2P1Q98Y/',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
}, {
|
|
||||||
'url': 'https://beta.crunchyroll.com/pt-br/watch/G8WUN8VKP/the-ruler-of-conspiracy',
|
|
||||||
'only_matching': True,
|
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
@ -797,38 +813,56 @@ class CrunchyrollBetaIE(CrunchyrollBetaBaseIE):
|
|||||||
|
|
||||||
episode_response = self._download_json(
|
episode_response = self._download_json(
|
||||||
f'{api_domain}/cms/v2{bucket}/episodes/{internal_id}', display_id,
|
f'{api_domain}/cms/v2{bucket}/episodes/{internal_id}', display_id,
|
||||||
note='Retrieving episode metadata', query=params)
|
note='Retrieving episode metadata',
|
||||||
|
query=params)
|
||||||
if episode_response.get('is_premium_only') and not episode_response.get('playback'):
|
if episode_response.get('is_premium_only') and not episode_response.get('playback'):
|
||||||
raise ExtractorError('This video is for premium members only.', expected=True)
|
raise ExtractorError('This video is for premium members only.', expected=True)
|
||||||
|
|
||||||
stream_response = self._download_json(
|
stream_response = self._download_json(
|
||||||
f'{api_domain}{episode_response["__links__"]["streams"]["href"]}', display_id,
|
episode_response['playback'], display_id,
|
||||||
note='Retrieving stream info', query=params)
|
note='Retrieving stream info')
|
||||||
get_streams = lambda name: (traverse_obj(stream_response, name) or {}).items()
|
|
||||||
|
thumbnails = []
|
||||||
|
for thumbnails_data in traverse_obj(episode_response, ('images', 'thumbnail')):
|
||||||
|
for thumbnail_data in thumbnails_data:
|
||||||
|
thumbnails.append({
|
||||||
|
'url': thumbnail_data.get('source'),
|
||||||
|
'width': thumbnail_data.get('width'),
|
||||||
|
'height': thumbnail_data.get('height'),
|
||||||
|
})
|
||||||
|
subtitles = {}
|
||||||
|
for lang, subtitle_data in stream_response.get('subtitles').items():
|
||||||
|
subtitles[lang] = [{
|
||||||
|
'url': subtitle_data.get('url'),
|
||||||
|
'ext': subtitle_data.get('format')
|
||||||
|
}]
|
||||||
|
|
||||||
requested_hardsubs = [('' if val == 'none' else val) for val in (self._configuration_arg('hardsub') or ['none'])]
|
requested_hardsubs = [('' if val == 'none' else val) for val in (self._configuration_arg('hardsub') or ['none'])]
|
||||||
hardsub_preference = qualities(requested_hardsubs[::-1])
|
hardsub_preference = qualities(requested_hardsubs[::-1])
|
||||||
requested_formats = self._configuration_arg('format') or ['adaptive_hls']
|
requested_formats = self._configuration_arg('format') or ['adaptive_hls']
|
||||||
|
|
||||||
formats = []
|
formats = []
|
||||||
for stream_type, streams in get_streams('streams'):
|
for stream_type, streams in stream_response.get('streams', {}).items():
|
||||||
if stream_type not in requested_formats:
|
if stream_type not in requested_formats:
|
||||||
continue
|
continue
|
||||||
for stream in streams.values():
|
for stream in streams.values():
|
||||||
hardsub_lang = stream.get('hardsub_locale') or ''
|
hardsub_lang = stream.get('hardsub_locale') or ''
|
||||||
if hardsub_lang.lower() not in requested_hardsubs:
|
if hardsub_lang.lower() not in requested_hardsubs:
|
||||||
continue
|
continue
|
||||||
format_id = join_nonempty(stream_type, format_field(stream, 'hardsub_locale', 'hardsub-%s'))
|
format_id = join_nonempty(
|
||||||
|
stream_type,
|
||||||
|
format_field(stream, 'hardsub_locale', 'hardsub-%s'))
|
||||||
if not stream.get('url'):
|
if not stream.get('url'):
|
||||||
continue
|
continue
|
||||||
if stream_type.endswith('hls'):
|
if stream_type.split('_')[-1] == 'hls':
|
||||||
adaptive_formats = self._extract_m3u8_formats(
|
adaptive_formats = self._extract_m3u8_formats(
|
||||||
stream['url'], display_id, 'mp4', m3u8_id=format_id,
|
stream['url'], display_id, 'mp4', m3u8_id=format_id,
|
||||||
fatal=False, note=f'Downloading {format_id} HLS manifest')
|
note='Downloading %s information' % format_id,
|
||||||
elif stream_type.endswith('dash'):
|
fatal=False)
|
||||||
|
elif stream_type.split('_')[-1] == 'dash':
|
||||||
adaptive_formats = self._extract_mpd_formats(
|
adaptive_formats = self._extract_mpd_formats(
|
||||||
stream['url'], display_id, mpd_id=format_id,
|
stream['url'], display_id, mpd_id=format_id,
|
||||||
fatal=False, note=f'Downloading {format_id} MPD manifest')
|
note='Downloading %s information' % format_id,
|
||||||
|
fatal=False)
|
||||||
for f in adaptive_formats:
|
for f in adaptive_formats:
|
||||||
if f.get('acodec') != 'none':
|
if f.get('acodec') != 'none':
|
||||||
f['language'] = stream_response.get('audio_locale')
|
f['language'] = stream_response.get('audio_locale')
|
||||||
@ -838,11 +872,10 @@ class CrunchyrollBetaIE(CrunchyrollBetaBaseIE):
|
|||||||
|
|
||||||
return {
|
return {
|
||||||
'id': internal_id,
|
'id': internal_id,
|
||||||
'title': '%s Episode %s – %s' % (
|
'title': '%s Episode %s – %s' % (episode_response.get('season_title'), episode_response.get('episode'), episode_response.get('title')),
|
||||||
episode_response.get('season_title'), episode_response.get('episode'), episode_response.get('title')),
|
'description': episode_response.get('description').replace(r'\r\n', '\n'),
|
||||||
'description': try_get(episode_response, lambda x: x['description'].replace(r'\r\n', '\n')),
|
|
||||||
'duration': float_or_none(episode_response.get('duration_ms'), 1000),
|
'duration': float_or_none(episode_response.get('duration_ms'), 1000),
|
||||||
'timestamp': parse_iso8601(episode_response.get('upload_date')),
|
'thumbnails': thumbnails,
|
||||||
'series': episode_response.get('series_title'),
|
'series': episode_response.get('series_title'),
|
||||||
'series_id': episode_response.get('series_id'),
|
'series_id': episode_response.get('series_id'),
|
||||||
'season': episode_response.get('season_title'),
|
'season': episode_response.get('season_title'),
|
||||||
@ -850,31 +883,28 @@ class CrunchyrollBetaIE(CrunchyrollBetaBaseIE):
|
|||||||
'season_number': episode_response.get('season_number'),
|
'season_number': episode_response.get('season_number'),
|
||||||
'episode': episode_response.get('title'),
|
'episode': episode_response.get('title'),
|
||||||
'episode_number': episode_response.get('sequence_number'),
|
'episode_number': episode_response.get('sequence_number'),
|
||||||
'formats': formats,
|
'subtitles': subtitles,
|
||||||
'thumbnails': [{
|
'formats': formats
|
||||||
'url': thumb.get('source'),
|
|
||||||
'width': thumb.get('width'),
|
|
||||||
'height': thumb.get('height'),
|
|
||||||
} for thumb in traverse_obj(episode_response, ('images', 'thumbnail', ..., ...)) or []],
|
|
||||||
'subtitles': {
|
|
||||||
lang: [{
|
|
||||||
'url': subtitle_data.get('url'),
|
|
||||||
'ext': subtitle_data.get('format')
|
|
||||||
}] for lang, subtitle_data in get_streams('subtitles')
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
class CrunchyrollBetaShowIE(CrunchyrollBetaBaseIE):
|
class CrunchyrollBetaShowIE(CrunchyrollBetaBaseIE):
|
||||||
IE_NAME = 'crunchyroll:playlist:beta'
|
IE_NAME = 'crunchyroll:playlist:beta'
|
||||||
_VALID_URL = r'https?://beta\.crunchyroll\.com/(?P<lang>(?:\w{2}(?:-\w{2})?/)?)series/(?P<id>\w+)/(?P<display_id>[\w\-]*)/?(?:\?|$)'
|
_VALID_URL = r'https?://beta\.crunchyroll\.com/(?P<lang>(?:\w{1,2}/)?)series/(?P<id>\w+)/(?P<display_id>[\w\-]*)/?(?:\?|$)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://beta.crunchyroll.com/series/GY19NQ2QR/Girl-Friend-BETA',
|
'url': 'https://beta.crunchyroll.com/series/GY19NQ2QR/Girl-Friend-BETA',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'GY19NQ2QR',
|
'id': 'girl-friend-beta',
|
||||||
'title': 'Girl Friend BETA',
|
'title': 'Girl Friend BETA',
|
||||||
},
|
},
|
||||||
'playlist_mincount': 10,
|
'playlist_mincount': 10,
|
||||||
|
}, {
|
||||||
|
'url': 'https://beta.crunchyroll.com/series/GYJQV73V6/love-chunibyo--other-delusions---heart-throb--',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'love-chunibyo-other-delusions-heart-throb-',
|
||||||
|
'title': 'Love, Chunibyo & Other Delusions - Heart Throb -',
|
||||||
|
},
|
||||||
|
'playlist_mincount': 10,
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://beta.crunchyroll.com/it/series/GY19NQ2QR/Girl-Friend-BETA',
|
'url': 'https://beta.crunchyroll.com/it/series/GY19NQ2QR/Girl-Friend-BETA',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
|
@ -163,7 +163,7 @@ class CSpanIE(InfoExtractor):
|
|||||||
video_id = m.group('id')
|
video_id = m.group('id')
|
||||||
video_type = 'program' if m.group('type') == 'prog' else 'clip'
|
video_type = 'program' if m.group('type') == 'prog' else 'clip'
|
||||||
else:
|
else:
|
||||||
senate_isvp_url = SenateISVPIE._extract_url(webpage)
|
senate_isvp_url = SenateISVPIE._search_iframe_url(webpage)
|
||||||
if senate_isvp_url:
|
if senate_isvp_url:
|
||||||
title = self._og_search_title(webpage)
|
title = self._og_search_title(webpage)
|
||||||
surl = smuggle_url(senate_isvp_url, {'force_title': title})
|
surl = smuggle_url(senate_isvp_url, {'force_title': title})
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_str
|
from ..compat import compat_str
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
@ -10,7 +12,6 @@ from ..utils import (
|
|||||||
|
|
||||||
class DailyMailIE(InfoExtractor):
|
class DailyMailIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://(?:www\.)?dailymail\.co\.uk/(?:video/[^/]+/video-|embed/video/)(?P<id>[0-9]+)'
|
_VALID_URL = r'https?://(?:www\.)?dailymail\.co\.uk/(?:video/[^/]+/video-|embed/video/)(?P<id>[0-9]+)'
|
||||||
_EMBED_REGEX = [r'<iframe\b[^>]+\bsrc=["\'](?P<url>(?:https?:)?//(?:www\.)?dailymail\.co\.uk/embed/video/\d+\.html)']
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://www.dailymail.co.uk/video/tvshowbiz/video-1295863/The-Mountain-appears-sparkling-water-ad-Heavy-Bubbles.html',
|
'url': 'http://www.dailymail.co.uk/video/tvshowbiz/video-1295863/The-Mountain-appears-sparkling-water-ad-Heavy-Bubbles.html',
|
||||||
'md5': 'f6129624562251f628296c3a9ffde124',
|
'md5': 'f6129624562251f628296c3a9ffde124',
|
||||||
@ -25,6 +26,12 @@ class DailyMailIE(InfoExtractor):
|
|||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _extract_urls(webpage):
|
||||||
|
return re.findall(
|
||||||
|
r'<iframe\b[^>]+\bsrc=["\'](?P<url>(?:https?:)?//(?:www\.)?dailymail\.co\.uk/embed/video/\d+\.html)',
|
||||||
|
webpage)
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
video_id = self._match_id(url)
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
@ -99,7 +99,6 @@ class DailymotionIE(DailymotionBaseInfoExtractor):
|
|||||||
[/=](?P<id>[^/?_&]+)(?:.+?\bplaylist=(?P<playlist_id>x[0-9a-z]+))?
|
[/=](?P<id>[^/?_&]+)(?:.+?\bplaylist=(?P<playlist_id>x[0-9a-z]+))?
|
||||||
'''
|
'''
|
||||||
IE_NAME = 'dailymotion'
|
IE_NAME = 'dailymotion'
|
||||||
_EMBED_REGEX = [r'<(?:(?:embed|iframe)[^>]+?src=|input[^>]+id=[\'"]dmcloudUrlEmissionSelect[\'"][^>]+value=)(["\'])(?P<url>(?:https?:)?//(?:www\.)?dailymotion\.com/(?:embed|swf)/video/.+?)\1']
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://www.dailymotion.com/video/x5kesuj_office-christmas-party-review-jason-bateman-olivia-munn-t-j-miller_news',
|
'url': 'http://www.dailymotion.com/video/x5kesuj_office-christmas-party-review-jason-bateman-olivia-munn-t-j-miller_news',
|
||||||
'md5': '074b95bdee76b9e3654137aee9c79dfe',
|
'md5': '074b95bdee76b9e3654137aee9c79dfe',
|
||||||
@ -209,13 +208,18 @@ class DailymotionIE(DailymotionBaseInfoExtractor):
|
|||||||
}
|
}
|
||||||
xid'''
|
xid'''
|
||||||
|
|
||||||
@classmethod
|
@staticmethod
|
||||||
def _extract_embed_urls(cls, url, webpage):
|
def _extract_urls(webpage):
|
||||||
|
urls = []
|
||||||
|
# Look for embedded Dailymotion player
|
||||||
# https://developer.dailymotion.com/player#player-parameters
|
# https://developer.dailymotion.com/player#player-parameters
|
||||||
yield from super()._extract_embed_urls(url, webpage)
|
for mobj in re.finditer(
|
||||||
|
r'<(?:(?:embed|iframe)[^>]+?src=|input[^>]+id=[\'"]dmcloudUrlEmissionSelect[\'"][^>]+value=)(["\'])(?P<url>(?:https?:)?//(?:www\.)?dailymotion\.com/(?:embed|swf)/video/.+?)\1', webpage):
|
||||||
|
urls.append(unescapeHTML(mobj.group('url')))
|
||||||
for mobj in re.finditer(
|
for mobj in re.finditer(
|
||||||
r'(?s)DM\.player\([^,]+,\s*{.*?video[\'"]?\s*:\s*["\']?(?P<id>[0-9a-zA-Z]+).+?}\s*\);', webpage):
|
r'(?s)DM\.player\([^,]+,\s*{.*?video[\'"]?\s*:\s*["\']?(?P<id>[0-9a-zA-Z]+).+?}\s*\);', webpage):
|
||||||
yield from 'https://www.dailymotion.com/embed/video/' + mobj.group('id')
|
urls.append('https://www.dailymotion.com/embed/video/' + mobj.group('id'))
|
||||||
|
return urls
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
url, smuggled_data = unsmuggle_url(url)
|
url, smuggled_data = unsmuggle_url(url)
|
||||||
@ -374,15 +378,6 @@ class DailymotionPlaylistIE(DailymotionPlaylistBaseIE):
|
|||||||
}]
|
}]
|
||||||
_OBJECT_TYPE = 'collection'
|
_OBJECT_TYPE = 'collection'
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def _extract_embed_urls(cls, url, webpage):
|
|
||||||
# Look for embedded Dailymotion playlist player (#3822)
|
|
||||||
for mobj in re.finditer(
|
|
||||||
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?dailymotion\.[a-z]{2,3}/widget/jukebox\?.+?)\1',
|
|
||||||
webpage):
|
|
||||||
for p in re.findall(r'list\[\]=/playlist/([^/]+)/', unescapeHTML(mobj.group('url'))):
|
|
||||||
yield '//dailymotion.com/playlist/%s' % p
|
|
||||||
|
|
||||||
|
|
||||||
class DailymotionUserIE(DailymotionPlaylistBaseIE):
|
class DailymotionUserIE(DailymotionPlaylistBaseIE):
|
||||||
IE_NAME = 'dailymotion:user'
|
IE_NAME = 'dailymotion:user'
|
||||||
|
@ -1,9 +1,10 @@
|
|||||||
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
|
||||||
|
|
||||||
class DBTVIE(InfoExtractor):
|
class DBTVIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://(?:www\.)?dagbladet\.no/video/(?:(?:embed|(?P<display_id>[^/]+))/)?(?P<id>[0-9A-Za-z_-]{11}|[a-zA-Z0-9]{8})'
|
_VALID_URL = r'https?://(?:www\.)?dagbladet\.no/video/(?:(?:embed|(?P<display_id>[^/]+))/)?(?P<id>[0-9A-Za-z_-]{11}|[a-zA-Z0-9]{8})'
|
||||||
_EMBED_REGEX = [r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?dagbladet\.no/video/embed/(?:[0-9A-Za-z_-]{11}|[a-zA-Z0-9]{8}).*?)\1']
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://www.dagbladet.no/video/PynxJnNWChE/',
|
'url': 'https://www.dagbladet.no/video/PynxJnNWChE/',
|
||||||
'md5': 'b8f850ba1860adbda668d367f9b77699',
|
'md5': 'b8f850ba1860adbda668d367f9b77699',
|
||||||
@ -27,6 +28,12 @@ class DBTVIE(InfoExtractor):
|
|||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _extract_urls(webpage):
|
||||||
|
return [url for _, url in re.findall(
|
||||||
|
r'<iframe[^>]+src=(["\'])((?:https?:)?//(?:www\.)?dagbladet\.no/video/embed/(?:[0-9A-Za-z_-]{11}|[a-zA-Z0-9]{8}).*?)\1',
|
||||||
|
webpage)]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
display_id, video_id = self._match_valid_url(url).groups()
|
display_id, video_id = self._match_valid_url(url).groups()
|
||||||
info = {
|
info = {
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import int_or_none
|
from ..utils import int_or_none
|
||||||
|
|
||||||
@ -23,7 +25,6 @@ class DigitekaIE(InfoExtractor):
|
|||||||
)
|
)
|
||||||
/id
|
/id
|
||||||
)/(?P<id>[\d+a-z]+)'''
|
)/(?P<id>[\d+a-z]+)'''
|
||||||
_EMBED_REGEX = [r'<(?:iframe|script)[^>]+src=["\'](?P<url>(?:https?:)?//(?:www\.)?ultimedia\.com/deliver/(?:generic|musique)(?:/[^/]+)*/(?:src|article)/[\d+a-z]+)']
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
# news
|
# news
|
||||||
'url': 'https://www.ultimedia.com/default/index/videogeneric/id/s8uk0r',
|
'url': 'https://www.ultimedia.com/default/index/videogeneric/id/s8uk0r',
|
||||||
@ -57,6 +58,14 @@ class DigitekaIE(InfoExtractor):
|
|||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _extract_url(webpage):
|
||||||
|
mobj = re.search(
|
||||||
|
r'<(?:iframe|script)[^>]+src=["\'](?P<url>(?:https?:)?//(?:www\.)?ultimedia\.com/deliver/(?:generic|musique)(?:/[^/]+)*/(?:src|article)/[\d+a-z]+)',
|
||||||
|
webpage)
|
||||||
|
if mobj:
|
||||||
|
return mobj.group('url')
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = self._match_valid_url(url)
|
mobj = self._match_valid_url(url)
|
||||||
video_id = mobj.group('id')
|
video_id = mobj.group('id')
|
||||||
|
@ -6,7 +6,7 @@ from .common import InfoExtractor
|
|||||||
|
|
||||||
|
|
||||||
class DoodStreamIE(InfoExtractor):
|
class DoodStreamIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://(?:www\.)?dood\.(?:to|watch|so|pm)/[ed]/(?P<id>[a-z0-9]+)'
|
_VALID_URL = r'https?://(?:www\.)?dood\.(?:to|watch)/[ed]/(?P<id>[a-z0-9]+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://dood.to/e/5s1wmbdacezb',
|
'url': 'http://dood.to/e/5s1wmbdacezb',
|
||||||
'md5': '4568b83b31e13242b3f1ff96c55f0595',
|
'md5': '4568b83b31e13242b3f1ff96c55f0595',
|
||||||
@ -37,9 +37,6 @@ class DoodStreamIE(InfoExtractor):
|
|||||||
'description': 'Stacy Cruz Cute ALLWAYSWELL | DoodStream.com',
|
'description': 'Stacy Cruz Cute ALLWAYSWELL | DoodStream.com',
|
||||||
'thumbnail': 'https://img.doodcdn.com/snaps/8edqd5nppkac3x8u.jpg',
|
'thumbnail': 'https://img.doodcdn.com/snaps/8edqd5nppkac3x8u.jpg',
|
||||||
}
|
}
|
||||||
}, {
|
|
||||||
'url': 'https://dood.so/d/jzrxn12t2s7n',
|
|
||||||
'only_matching': True
|
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
@ -47,8 +44,7 @@ class DoodStreamIE(InfoExtractor):
|
|||||||
url = f'https://dood.to/e/{video_id}'
|
url = f'https://dood.to/e/{video_id}'
|
||||||
webpage = self._download_webpage(url, video_id)
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
title = self._html_search_meta(
|
title = self._html_search_meta(['og:title', 'twitter:title'], webpage, default=None)
|
||||||
('og:title', 'twitter:title'), webpage, default=None) or self._html_extract_title(webpage)
|
|
||||||
thumb = self._html_search_meta(['og:image', 'twitter:image'], webpage, default=None)
|
thumb = self._html_search_meta(['og:image', 'twitter:image'], webpage, default=None)
|
||||||
token = self._html_search_regex(r'[?&]token=([a-z0-9]+)[&\']', webpage, 'token')
|
token = self._html_search_regex(r'[?&]token=([a-z0-9]+)[&\']', webpage, 'token')
|
||||||
description = self._html_search_meta(
|
description = self._html_search_meta(
|
||||||
|
@ -718,33 +718,6 @@ class TLCIE(DiscoveryPlusBaseIE):
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
class MotorTrendIE(DiscoveryPlusBaseIE):
|
|
||||||
_VALID_URL = r'https?://(?:watch\.)?motortrend\.com/video' + DPlayBaseIE._PATH_REGEX
|
|
||||||
_TESTS = [{
|
|
||||||
'url': 'https://watch.motortrend.com/video/car-issues-motortrend-atve-us/double-dakotas',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '"4859182"',
|
|
||||||
'display_id': 'double-dakotas',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'Double Dakotas',
|
|
||||||
'description': 'Tylers buy-one-get-one Dakota deal has the Wizard pulling double duty.',
|
|
||||||
'season_number': 2,
|
|
||||||
'episode_number': 3,
|
|
||||||
},
|
|
||||||
'skip': 'Available for Premium users',
|
|
||||||
}, {
|
|
||||||
'url': 'https://watch.motortrend.com/video/car-issues-motortrend-atve-us/double-dakotas',
|
|
||||||
'only_matching': True,
|
|
||||||
}]
|
|
||||||
|
|
||||||
_PRODUCT = 'vel'
|
|
||||||
_DISCO_API_PARAMS = {
|
|
||||||
'disco_host': 'us1-prod-direct.watch.motortrend.com',
|
|
||||||
'realm': 'go',
|
|
||||||
'country': 'us',
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class DiscoveryPlusIE(DiscoveryPlusBaseIE):
|
class DiscoveryPlusIE(DiscoveryPlusBaseIE):
|
||||||
_VALID_URL = r'https?://(?:www\.)?discoveryplus\.com/(?!it/)(?:\w{2}/)?video' + DPlayBaseIE._PATH_REGEX
|
_VALID_URL = r'https?://(?:www\.)?discoveryplus\.com/(?!it/)(?:\w{2}/)?video' + DPlayBaseIE._PATH_REGEX
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
|
@ -11,7 +11,6 @@ from ..utils import (
|
|||||||
|
|
||||||
class DrTuberIE(InfoExtractor):
|
class DrTuberIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://(?:(?:www|m)\.)?drtuber\.com/(?:video|embed)/(?P<id>\d+)(?:/(?P<display_id>[\w-]+))?'
|
_VALID_URL = r'https?://(?:(?:www|m)\.)?drtuber\.com/(?:video|embed)/(?P<id>\d+)(?:/(?P<display_id>[\w-]+))?'
|
||||||
_EMBED_REGEX = [r'<iframe[^>]+?src=["\'](?P<url>(?:https?:)?//(?:www\.)?drtuber\.com/embed/\d+)']
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://www.drtuber.com/video/1740434/hot-perky-blonde-naked-golf',
|
'url': 'http://www.drtuber.com/video/1740434/hot-perky-blonde-naked-golf',
|
||||||
'md5': '93e680cf2536ad0dfb7e74d94a89facd',
|
'md5': '93e680cf2536ad0dfb7e74d94a89facd',
|
||||||
@ -34,6 +33,12 @@ class DrTuberIE(InfoExtractor):
|
|||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _extract_urls(webpage):
|
||||||
|
return re.findall(
|
||||||
|
r'<iframe[^>]+?src=["\'](?P<url>(?:https?:)?//(?:www\.)?drtuber\.com/embed/\d+)',
|
||||||
|
webpage)
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
mobj = self._match_valid_url(url)
|
mobj = self._match_valid_url(url)
|
||||||
video_id = mobj.group('id')
|
video_id = mobj.group('id')
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
import functools
|
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
@ -6,7 +5,6 @@ from ..compat import compat_HTTPError
|
|||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
smuggle_url,
|
|
||||||
unsmuggle_url,
|
unsmuggle_url,
|
||||||
url_or_none,
|
url_or_none,
|
||||||
)
|
)
|
||||||
@ -20,7 +18,6 @@ class EaglePlatformIE(InfoExtractor):
|
|||||||
)
|
)
|
||||||
(?P<id>\d+)
|
(?P<id>\d+)
|
||||||
'''
|
'''
|
||||||
_EMBED_REGEX = [r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//.+?\.media\.eagleplatform\.com/index/player\?.+?)\1']
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
# http://lenta.ru/news/2015/03/06/navalny/
|
# http://lenta.ru/news/2015/03/06/navalny/
|
||||||
'url': 'http://lentaru.media.eagleplatform.com/index/player?player=new&record_id=227304&player_template_id=5201',
|
'url': 'http://lentaru.media.eagleplatform.com/index/player?player=new&record_id=227304&player_template_id=5201',
|
||||||
@ -55,14 +52,14 @@ class EaglePlatformIE(InfoExtractor):
|
|||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
@classmethod
|
@staticmethod
|
||||||
def _extract_embed_urls(cls, url, webpage):
|
def _extract_url(webpage):
|
||||||
add_referer = functools.partial(smuggle_url, data={'referrer': url})
|
# Regular iframe embedding
|
||||||
|
mobj = re.search(
|
||||||
res = tuple(super()._extract_embed_urls(url, webpage))
|
r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//.+?\.media\.eagleplatform\.com/index/player\?.+?)\1',
|
||||||
if res:
|
webpage)
|
||||||
return map(add_referer, res)
|
if mobj is not None:
|
||||||
|
return mobj.group('url')
|
||||||
PLAYER_JS_RE = r'''
|
PLAYER_JS_RE = r'''
|
||||||
<script[^>]+
|
<script[^>]+
|
||||||
src=(?P<qjs>["\'])(?:https?:)?//(?P<host>(?:(?!(?P=qjs)).)+\.media\.eagleplatform\.com)/player/player\.js(?P=qjs)
|
src=(?P<qjs>["\'])(?:https?:)?//(?P<host>(?:(?!(?P=qjs)).)+\.media\.eagleplatform\.com)/player/player\.js(?P=qjs)
|
||||||
@ -77,7 +74,7 @@ class EaglePlatformIE(InfoExtractor):
|
|||||||
data-id=["\'](?P<id>\d+)
|
data-id=["\'](?P<id>\d+)
|
||||||
''' % PLAYER_JS_RE, webpage)
|
''' % PLAYER_JS_RE, webpage)
|
||||||
if mobj is not None:
|
if mobj is not None:
|
||||||
return [add_referer('eagleplatform:%(host)s:%(id)s' % mobj.groupdict())]
|
return 'eagleplatform:%(host)s:%(id)s' % mobj.groupdict()
|
||||||
# Generalization of "Javascript code usage", "Combined usage" and
|
# Generalization of "Javascript code usage", "Combined usage" and
|
||||||
# "Usage without attaching to DOM" embeddings (see
|
# "Usage without attaching to DOM" embeddings (see
|
||||||
# http://dultonmedia.github.io/eplayer/)
|
# http://dultonmedia.github.io/eplayer/)
|
||||||
@ -98,7 +95,7 @@ class EaglePlatformIE(InfoExtractor):
|
|||||||
</script>
|
</script>
|
||||||
''' % PLAYER_JS_RE, webpage)
|
''' % PLAYER_JS_RE, webpage)
|
||||||
if mobj is not None:
|
if mobj is not None:
|
||||||
return [add_referer('eagleplatform:%(host)s:%(id)s' % mobj.groupdict())]
|
return 'eagleplatform:%(host)s:%(id)s' % mobj.groupdict()
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _handle_error(response):
|
def _handle_error(response):
|
||||||
@ -204,14 +201,3 @@ class EaglePlatformIE(InfoExtractor):
|
|||||||
'age_limit': age_limit,
|
'age_limit': age_limit,
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
class ClipYouEmbedIE(InfoExtractor):
|
|
||||||
_VALID_URL = False
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def _extract_embed_urls(cls, url, webpage):
|
|
||||||
mobj = re.search(
|
|
||||||
r'<iframe[^>]+src="https?://(?P<host>media\.clipyou\.ru)/index/player\?.*\brecord_id=(?P<id>\d+).*"', webpage)
|
|
||||||
if mobj is not None:
|
|
||||||
yield smuggle_url('eagleplatform:%(host)s:%(id)s' % mobj.groupdict(), {'referrer': url})
|
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
import re
|
|
||||||
import urllib.parse
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_urllib_parse_unquote
|
from ..compat import compat_urllib_parse_unquote
|
||||||
|
|
||||||
@ -11,14 +9,5 @@ class EmbedlyIE(InfoExtractor):
|
|||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def _extract_embed_urls(cls, url, webpage):
|
|
||||||
# Bypass suitable check
|
|
||||||
for mobj in re.finditer(r'class=["\']embedly-card["\'][^>]href=["\'](?P<url>[^"\']+)', webpage):
|
|
||||||
yield mobj.group('url')
|
|
||||||
|
|
||||||
for mobj in re.finditer(r'class=["\']embedly-embed["\'][^>]src=["\'][^"\']*url=(?P<url>[^&]+)', webpage):
|
|
||||||
yield urllib.parse.unquote(mobj.group('url'))
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
return self.url_result(compat_urllib_parse_unquote(self._match_id(url)))
|
return self.url_result(compat_urllib_parse_unquote(self._match_id(url)))
|
||||||
|
@ -15,6 +15,7 @@ from ..utils import (
|
|||||||
parse_iso8601,
|
parse_iso8601,
|
||||||
str_or_none,
|
str_or_none,
|
||||||
try_get,
|
try_get,
|
||||||
|
unescapeHTML,
|
||||||
url_or_none,
|
url_or_none,
|
||||||
variadic,
|
variadic,
|
||||||
)
|
)
|
||||||
@ -274,7 +275,6 @@ class ERTWebtvEmbedIE(InfoExtractor):
|
|||||||
IE_DESC = 'ert.gr webtv embedded videos'
|
IE_DESC = 'ert.gr webtv embedded videos'
|
||||||
_BASE_PLAYER_URL_RE = re.escape('//www.ert.gr/webtv/live-uni/vod/dt-uni-vod.php')
|
_BASE_PLAYER_URL_RE = re.escape('//www.ert.gr/webtv/live-uni/vod/dt-uni-vod.php')
|
||||||
_VALID_URL = rf'https?:{_BASE_PLAYER_URL_RE}\?([^#]+&)?f=(?P<id>[^#&]+)'
|
_VALID_URL = rf'https?:{_BASE_PLAYER_URL_RE}\?([^#]+&)?f=(?P<id>[^#&]+)'
|
||||||
_EMBED_REGEX = [rf'<iframe[^>]+?src=(?P<_q1>["\'])(?P<url>(?:https?:)?{_BASE_PLAYER_URL_RE}\?(?:(?!(?P=_q1)).)+)(?P=_q1)']
|
|
||||||
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://www.ert.gr/webtv/live-uni/vod/dt-uni-vod.php?f=trailers/E2251_TO_DIKTYO_E09_16-01_1900.mp4&bgimg=/photos/2022/1/to_diktio_ep09_i_istoria_tou_diadiktiou_stin_Ellada_1021x576.jpg',
|
'url': 'https://www.ert.gr/webtv/live-uni/vod/dt-uni-vod.php?f=trailers/E2251_TO_DIKTYO_E09_16-01_1900.mp4&bgimg=/photos/2022/1/to_diktio_ep09_i_istoria_tou_diadiktiou_stin_Ellada_1021x576.jpg',
|
||||||
@ -287,6 +287,17 @@ class ERTWebtvEmbedIE(InfoExtractor):
|
|||||||
},
|
},
|
||||||
}]
|
}]
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def _extract_urls(cls, webpage):
|
||||||
|
EMBED_URL_RE = rf'(?:https?:)?{cls._BASE_PLAYER_URL_RE}\?(?:(?!(?P=_q1)).)+'
|
||||||
|
EMBED_RE = rf'<iframe[^>]+?src=(?P<_q1>["\'])(?P<url>{EMBED_URL_RE})(?P=_q1)'
|
||||||
|
|
||||||
|
for mobj in re.finditer(EMBED_RE, webpage):
|
||||||
|
url = unescapeHTML(mobj.group('url'))
|
||||||
|
if not cls.suitable(url):
|
||||||
|
continue
|
||||||
|
yield url
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
video_id = self._match_id(url)
|
||||||
formats, subs = self._extract_m3u8_formats_and_subtitles(
|
formats, subs = self._extract_m3u8_formats_and_subtitles(
|
||||||
|
@ -10,7 +10,6 @@ from ..utils import (
|
|||||||
determine_ext,
|
determine_ext,
|
||||||
dict_get,
|
dict_get,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
traverse_obj,
|
|
||||||
unified_strdate,
|
unified_strdate,
|
||||||
unified_timestamp,
|
unified_timestamp,
|
||||||
)
|
)
|
||||||
@ -284,24 +283,22 @@ class ESPNCricInfoIE(InfoExtractor):
|
|||||||
class WatchESPNIE(AdobePassIE):
|
class WatchESPNIE(AdobePassIE):
|
||||||
_VALID_URL = r'https?://(?:www\.)?espn\.com/(?:watch|espnplus)/player/_/id/(?P<id>[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})'
|
_VALID_URL = r'https?://(?:www\.)?espn\.com/(?:watch|espnplus)/player/_/id/(?P<id>[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://www.espn.com/watch/player/_/id/dbbc6b1d-c084-4b47-9878-5f13c56ce309',
|
'url': 'https://www.espn.com/watch/player/_/id/ba7d17da-453b-4697-bf92-76a99f61642b',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'dbbc6b1d-c084-4b47-9878-5f13c56ce309',
|
'id': 'ba7d17da-453b-4697-bf92-76a99f61642b',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Huddersfield vs. Burnley',
|
'title': 'Serbia vs. Turkey',
|
||||||
'duration': 7500,
|
'thumbnail': 'https://artwork.api.espn.com/artwork/collections/media/ba7d17da-453b-4697-bf92-76a99f61642b/default?width=640&apikey=1ngjw23osgcis1i1vbj96lmfqs',
|
||||||
'thumbnail': 'https://artwork.api.espn.com/artwork/collections/media/dbbc6b1d-c084-4b47-9878-5f13c56ce309/default?width=640&apikey=1ngjw23osgcis1i1vbj96lmfqs',
|
|
||||||
},
|
},
|
||||||
'params': {
|
'params': {
|
||||||
'skip_download': True,
|
'skip_download': True,
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://www.espn.com/watch/player/_/id/a049a56e-a7ce-477e-aef3-c7e48ef8221c',
|
'url': 'https://www.espn.com/watch/player/_/id/4e9b5bd1-4ceb-4482-9d28-1dd5f30d2f34',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'a049a56e-a7ce-477e-aef3-c7e48ef8221c',
|
'id': '4e9b5bd1-4ceb-4482-9d28-1dd5f30d2f34',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Dynamo Dresden vs. VfB Stuttgart (Round #1) (German Cup)',
|
'title': 'Real Madrid vs. Real Betis (LaLiga)',
|
||||||
'duration': 8335,
|
|
||||||
'thumbnail': 'https://s.secure.espncdn.com/stitcher/artwork/collections/media/bd1f3d12-0654-47d9-852e-71b85ea695c7/16x9.jpg?timestamp=202201112217&showBadge=true&cb=12&package=ESPN_PLUS',
|
'thumbnail': 'https://s.secure.espncdn.com/stitcher/artwork/collections/media/bd1f3d12-0654-47d9-852e-71b85ea695c7/16x9.jpg?timestamp=202201112217&showBadge=true&cb=12&package=ESPN_PLUS',
|
||||||
},
|
},
|
||||||
'params': {
|
'params': {
|
||||||
@ -313,7 +310,6 @@ class WatchESPNIE(AdobePassIE):
|
|||||||
'id': '317f5fd1-c78a-4ebe-824a-129e0d348421',
|
'id': '317f5fd1-c78a-4ebe-824a-129e0d348421',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'The Wheel - Episode 10',
|
'title': 'The Wheel - Episode 10',
|
||||||
'duration': 3352,
|
|
||||||
'thumbnail': 'https://s.secure.espncdn.com/stitcher/artwork/collections/media/317f5fd1-c78a-4ebe-824a-129e0d348421/16x9.jpg?timestamp=202205031523&showBadge=true&cb=12&package=ESPN_PLUS',
|
'thumbnail': 'https://s.secure.espncdn.com/stitcher/artwork/collections/media/317f5fd1-c78a-4ebe-824a-129e0d348421/16x9.jpg?timestamp=202205031523&showBadge=true&cb=12&package=ESPN_PLUS',
|
||||||
},
|
},
|
||||||
'params': {
|
'params': {
|
||||||
@ -332,10 +328,9 @@ class WatchESPNIE(AdobePassIE):
|
|||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
video_id = self._match_id(url)
|
||||||
cdn_data = self._download_json(
|
video_data = self._download_json(
|
||||||
f'https://watch-cdn.product.api.espn.com/api/product/v3/watchespn/web/playback/event?id={video_id}',
|
f'https://watch-cdn.product.api.espn.com/api/product/v3/watchespn/web/playback/event?id={video_id}',
|
||||||
video_id)
|
video_id)['playbackState']
|
||||||
video_data = cdn_data['playbackState']
|
|
||||||
|
|
||||||
# ESPN+ subscription required, through cookies
|
# ESPN+ subscription required, through cookies
|
||||||
if 'DTC' in video_data.get('sourceId'):
|
if 'DTC' in video_data.get('sourceId'):
|
||||||
@ -404,7 +399,6 @@ class WatchESPNIE(AdobePassIE):
|
|||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'duration': traverse_obj(cdn_data, ('tracking', 'duration')),
|
|
||||||
'title': video_data.get('name'),
|
'title': video_data.get('name'),
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
'subtitles': subtitles,
|
'subtitles': subtitles,
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
determine_ext,
|
determine_ext,
|
||||||
@ -15,7 +17,6 @@ class ExpressenIE(InfoExtractor):
|
|||||||
tv/(?:[^/]+/)*
|
tv/(?:[^/]+/)*
|
||||||
(?P<id>[^/?#&]+)
|
(?P<id>[^/?#&]+)
|
||||||
'''
|
'''
|
||||||
_EMBED_REGEX = [r'<iframe[^>]+\bsrc=(["\'])(?P<url>(?:https?:)?//(?:www\.)?(?:expressen|di)\.se/(?:tvspelare/video|videoplayer/embed)/tv/.+?)\1']
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://www.expressen.se/tv/ledare/ledarsnack/ledarsnack-om-arbetslosheten-bland-kvinnor-i-speciellt-utsatta-omraden/',
|
'url': 'https://www.expressen.se/tv/ledare/ledarsnack/ledarsnack-om-arbetslosheten-bland-kvinnor-i-speciellt-utsatta-omraden/',
|
||||||
'md5': 'deb2ca62e7b1dcd19fa18ba37523f66e',
|
'md5': 'deb2ca62e7b1dcd19fa18ba37523f66e',
|
||||||
@ -44,6 +45,13 @@ class ExpressenIE(InfoExtractor):
|
|||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _extract_urls(webpage):
|
||||||
|
return [
|
||||||
|
mobj.group('url') for mobj in re.finditer(
|
||||||
|
r'<iframe[^>]+\bsrc=(["\'])(?P<url>(?:https?:)?//(?:www\.)?(?:expressen|di)\.se/(?:tvspelare/video|videoplayer/embed)/tv/.+?)\1',
|
||||||
|
webpage)]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
display_id = self._match_id(url)
|
display_id = self._match_id(url)
|
||||||
|
|
||||||
|
@ -57,13 +57,6 @@ class FacebookIE(InfoExtractor):
|
|||||||
)
|
)
|
||||||
(?P<id>[0-9]+)
|
(?P<id>[0-9]+)
|
||||||
'''
|
'''
|
||||||
_EMBED_REGEX = [
|
|
||||||
r'<iframe[^>]+?src=(["\'])(?P<url>https?://www\.facebook\.com/(?:video/embed|plugins/video\.php).+?)\1',
|
|
||||||
# Facebook API embed https://developers.facebook.com/docs/plugins/embedded-video-player
|
|
||||||
r'''(?x)<div[^>]+
|
|
||||||
class=(?P<q1>[\'"])[^\'"]*\bfb-(?:video|post)\b[^\'"]*(?P=q1)[^>]+
|
|
||||||
data-href=(?P<q2>[\'"])(?P<url>(?:https?:)?//(?:www\.)?facebook.com/.+?)(?P=q2)''',
|
|
||||||
]
|
|
||||||
_LOGIN_URL = 'https://www.facebook.com/login.php?next=http%3A%2F%2Ffacebook.com%2Fhome.php&login_attempt=1'
|
_LOGIN_URL = 'https://www.facebook.com/login.php?next=http%3A%2F%2Ffacebook.com%2Fhome.php&login_attempt=1'
|
||||||
_CHECKPOINT_URL = 'https://www.facebook.com/checkpoint/?next=http%3A%2F%2Ffacebook.com%2Fhome.php&_fb_noscript=1'
|
_CHECKPOINT_URL = 'https://www.facebook.com/checkpoint/?next=http%3A%2F%2Ffacebook.com%2Fhome.php&_fb_noscript=1'
|
||||||
_NETRC_MACHINE = 'facebook'
|
_NETRC_MACHINE = 'facebook'
|
||||||
@ -318,6 +311,21 @@ class FacebookIE(InfoExtractor):
|
|||||||
'graphURI': '/api/graphql/'
|
'graphURI': '/api/graphql/'
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _extract_urls(webpage):
|
||||||
|
urls = []
|
||||||
|
for mobj in re.finditer(
|
||||||
|
r'<iframe[^>]+?src=(["\'])(?P<url>https?://www\.facebook\.com/(?:video/embed|plugins/video\.php).+?)\1',
|
||||||
|
webpage):
|
||||||
|
urls.append(mobj.group('url'))
|
||||||
|
# Facebook API embed
|
||||||
|
# see https://developers.facebook.com/docs/plugins/embedded-video-player
|
||||||
|
for mobj in re.finditer(r'''(?x)<div[^>]+
|
||||||
|
class=(?P<q1>[\'"])[^\'"]*\bfb-(?:video|post)\b[^\'"]*(?P=q1)[^>]+
|
||||||
|
data-href=(?P<q2>[\'"])(?P<url>(?:https?:)?//(?:www\.)?facebook.com/.+?)(?P=q2)''', webpage):
|
||||||
|
urls.append(mobj.group('url'))
|
||||||
|
return urls
|
||||||
|
|
||||||
def _perform_login(self, username, password):
|
def _perform_login(self, username, password):
|
||||||
login_page_req = sanitized_Request(self._LOGIN_URL)
|
login_page_req = sanitized_Request(self._LOGIN_URL)
|
||||||
self._set_cookie('facebook.com', 'locale', 'en_US')
|
self._set_cookie('facebook.com', 'locale', 'en_US')
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user