Compare commits

..

No commits in common. "0b5583b112d418ba4d4eefcde1cd4d54ab95458a" and "24093d52a768e624a3ecd9d834f3239f64e1bf2c" have entirely different histories.

24 changed files with 89 additions and 626 deletions

View File

@ -11,7 +11,7 @@ body:
options:
- label: I'm reporting a broken site
required: true
- label: I've verified that I'm running yt-dlp version **2022.07.18** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
- label: I've verified that I'm running yt-dlp version **2022.06.29** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
required: true
- label: I've checked that all provided URLs are playable in a browser with the same IP and same login details
required: true
@ -55,7 +55,7 @@ body:
[debug] Command-line config: ['-vU', 'test:youtube']
[debug] Portable config "yt-dlp.conf": ['-i']
[debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8
[debug] yt-dlp version 2022.07.18 [9d339c4] (win32_exe)
[debug] yt-dlp version 2022.06.29 [9d339c4] (win32_exe)
[debug] Python 3.8.10 (CPython 64bit) - Windows-10-10.0.22000-SP0
[debug] Checking exe version: ffmpeg -bsfs
[debug] Checking exe version: ffprobe -bsfs
@ -63,8 +63,8 @@ body:
[debug] Optional libraries: Cryptodome-3.15.0, brotli-1.0.9, certifi-2022.06.15, mutagen-1.45.1, sqlite3-2.6.0, websockets-10.3
[debug] Proxy map: {}
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest
Latest version: 2022.07.18, Current version: 2022.07.18
yt-dlp is up to date (2022.07.18)
Latest version: 2022.06.29, Current version: 2022.06.29
yt-dlp is up to date (2022.06.29)
<more lines>
render: shell
validations:

View File

@ -11,7 +11,7 @@ body:
options:
- label: I'm reporting a new site support request
required: true
- label: I've verified that I'm running yt-dlp version **2022.07.18** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
- label: I've verified that I'm running yt-dlp version **2022.06.29** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
required: true
- label: I've checked that all provided URLs are playable in a browser with the same IP and same login details
required: true
@ -67,7 +67,7 @@ body:
[debug] Command-line config: ['-vU', 'test:youtube']
[debug] Portable config "yt-dlp.conf": ['-i']
[debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8
[debug] yt-dlp version 2022.07.18 [9d339c4] (win32_exe)
[debug] yt-dlp version 2022.06.29 [9d339c4] (win32_exe)
[debug] Python 3.8.10 (CPython 64bit) - Windows-10-10.0.22000-SP0
[debug] Checking exe version: ffmpeg -bsfs
[debug] Checking exe version: ffprobe -bsfs
@ -75,8 +75,8 @@ body:
[debug] Optional libraries: Cryptodome-3.15.0, brotli-1.0.9, certifi-2022.06.15, mutagen-1.45.1, sqlite3-2.6.0, websockets-10.3
[debug] Proxy map: {}
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest
Latest version: 2022.07.18, Current version: 2022.07.18
yt-dlp is up to date (2022.07.18)
Latest version: 2022.06.29, Current version: 2022.06.29
yt-dlp is up to date (2022.06.29)
<more lines>
render: shell
validations:

View File

@ -11,7 +11,7 @@ body:
options:
- label: I'm requesting a site-specific feature
required: true
- label: I've verified that I'm running yt-dlp version **2022.07.18** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
- label: I've verified that I'm running yt-dlp version **2022.06.29** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
required: true
- label: I've checked that all provided URLs are playable in a browser with the same IP and same login details
required: true
@ -63,7 +63,7 @@ body:
[debug] Command-line config: ['-vU', 'test:youtube']
[debug] Portable config "yt-dlp.conf": ['-i']
[debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8
[debug] yt-dlp version 2022.07.18 [9d339c4] (win32_exe)
[debug] yt-dlp version 2022.06.29 [9d339c4] (win32_exe)
[debug] Python 3.8.10 (CPython 64bit) - Windows-10-10.0.22000-SP0
[debug] Checking exe version: ffmpeg -bsfs
[debug] Checking exe version: ffprobe -bsfs
@ -71,8 +71,8 @@ body:
[debug] Optional libraries: Cryptodome-3.15.0, brotli-1.0.9, certifi-2022.06.15, mutagen-1.45.1, sqlite3-2.6.0, websockets-10.3
[debug] Proxy map: {}
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest
Latest version: 2022.07.18, Current version: 2022.07.18
yt-dlp is up to date (2022.07.18)
Latest version: 2022.06.29, Current version: 2022.06.29
yt-dlp is up to date (2022.06.29)
<more lines>
render: shell
validations:

View File

@ -11,7 +11,7 @@ body:
options:
- label: I'm reporting a bug unrelated to a specific site
required: true
- label: I've verified that I'm running yt-dlp version **2022.07.18** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
- label: I've verified that I'm running yt-dlp version **2022.06.29** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
required: true
- label: I've checked that all provided URLs are playable in a browser with the same IP and same login details
required: true
@ -48,7 +48,7 @@ body:
[debug] Command-line config: ['-vU', 'test:youtube']
[debug] Portable config "yt-dlp.conf": ['-i']
[debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8
[debug] yt-dlp version 2022.07.18 [9d339c4] (win32_exe)
[debug] yt-dlp version 2022.06.29 [9d339c4] (win32_exe)
[debug] Python 3.8.10 (CPython 64bit) - Windows-10-10.0.22000-SP0
[debug] Checking exe version: ffmpeg -bsfs
[debug] Checking exe version: ffprobe -bsfs
@ -56,8 +56,8 @@ body:
[debug] Optional libraries: Cryptodome-3.15.0, brotli-1.0.9, certifi-2022.06.15, mutagen-1.45.1, sqlite3-2.6.0, websockets-10.3
[debug] Proxy map: {}
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest
Latest version: 2022.07.18, Current version: 2022.07.18
yt-dlp is up to date (2022.07.18)
Latest version: 2022.06.29, Current version: 2022.06.29
yt-dlp is up to date (2022.06.29)
<more lines>
render: shell
validations:

View File

@ -13,7 +13,7 @@ body:
required: true
- label: I've looked through the [README](https://github.com/yt-dlp/yt-dlp#readme)
required: true
- label: I've verified that I'm running yt-dlp version **2022.07.18** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
- label: I've verified that I'm running yt-dlp version **2022.06.29** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
required: true
- label: I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues **including closed ones**. DO NOT post duplicates
required: true
@ -44,7 +44,7 @@ body:
[debug] Command-line config: ['-vU', 'test:youtube']
[debug] Portable config "yt-dlp.conf": ['-i']
[debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8
[debug] yt-dlp version 2022.07.18 [9d339c4] (win32_exe)
[debug] yt-dlp version 2022.06.29 [9d339c4] (win32_exe)
[debug] Python 3.8.10 (CPython 64bit) - Windows-10-10.0.22000-SP0
[debug] Checking exe version: ffmpeg -bsfs
[debug] Checking exe version: ffprobe -bsfs
@ -52,7 +52,7 @@ body:
[debug] Optional libraries: Cryptodome-3.15.0, brotli-1.0.9, certifi-2022.06.15, mutagen-1.45.1, sqlite3-2.6.0, websockets-10.3
[debug] Proxy map: {}
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest
Latest version: 2022.07.18, Current version: 2022.07.18
yt-dlp is up to date (2022.07.18)
Latest version: 2022.06.29, Current version: 2022.06.29
yt-dlp is up to date (2022.06.29)
<more lines>
render: shell

View File

@ -19,7 +19,7 @@ body:
required: true
- label: I've looked through the [README](https://github.com/yt-dlp/yt-dlp#readme)
required: true
- label: I've verified that I'm running yt-dlp version **2022.07.18** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
- label: I've verified that I'm running yt-dlp version **2022.06.29** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
required: true
- label: I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar questions **including closed ones**. DO NOT post duplicates
required: true
@ -50,7 +50,7 @@ body:
[debug] Command-line config: ['-vU', 'test:youtube']
[debug] Portable config "yt-dlp.conf": ['-i']
[debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8
[debug] yt-dlp version 2022.07.18 [9d339c4] (win32_exe)
[debug] yt-dlp version 2022.06.29 [9d339c4] (win32_exe)
[debug] Python 3.8.10 (CPython 64bit) - Windows-10-10.0.22000-SP0
[debug] Checking exe version: ffmpeg -bsfs
[debug] Checking exe version: ffprobe -bsfs
@ -58,7 +58,7 @@ body:
[debug] Optional libraries: Cryptodome-3.15.0, brotli-1.0.9, certifi-2022.06.15, mutagen-1.45.1, sqlite3-2.6.0, websockets-10.3
[debug] Proxy map: {}
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest
Latest version: 2022.07.18, Current version: 2022.07.18
yt-dlp is up to date (2022.07.18)
Latest version: 2022.06.29, Current version: 2022.06.29
yt-dlp is up to date (2022.06.29)
<more lines>
render: shell

View File

@ -456,7 +456,6 @@ jobs:
- name: Make Update spec
run: |
echo "# This file is used for regulating self-update" >> _update_spec
echo "lock 2022.07.18 .+ Python 3.6" >> _update_spec
- name: Upload update spec
uses: actions/upload-release-asset@v1
env:

2
.gitignore vendored
View File

@ -27,13 +27,11 @@ cookies
*.ass
*.avi
*.desktop
*.f4v
*.flac
*.flv
*.jpeg
*.jpg
*.m4a
*.mpga
*.m4v
*.mhtml
*.mkv

View File

@ -272,16 +272,3 @@ crazymoose77756
nomevi
Brett824
pingiun
dosy4ev
EhtishamSabir
Ferdi265
FirefoxMetzger
ftk
lamby
llamasblade
lockmatrix
misaelaguayo
odo2063
pritam20ps05
scy
sheerluck

View File

@ -11,73 +11,6 @@
-->
### 2022.07.18
* Allow users to specify encoding in each config files by [Lesmiscore](https://github.com/Lesmiscore)
* Discard infodict from memory if no longer needed
* Do not allow extractors to return `None`
* Do not load system certificates when `certifi` is used
* Fix rounding of integers in format table
* Improve chapter sanitization
* Skip some fixup if remux/recode is needed by [Lesmiscore](https://github.com/Lesmiscore)
* Support `--no-progress` for `--wait-for-video`
* Fix bug in [612f2be](https://github.com/yt-dlp/yt-dlp/commit/612f2be5d3924540158dfbe5f25d841f04cff8c6)
* [outtmpl] Add alternate form `h` for HTML escaping
* [aes] Add multiple padding modes in CBC by [elyse0](https://github.com/elyse0)
* [extractor/common] Passthrough `errnote=False` to parsers
* [extractor/generic] Remove HEAD request
* [http] Ensure the file handle is always closed
* [ModifyChapters] Modify duration in infodict
* [options] Fix aliases to `--config-location`
* [utils] Fix `get_domain`
* [build] Consistent order for lazy extractors by [lamby](https://github.com/lamby)
* [build] Fix architecture suffix of executables by [odo2063](https://github.com/odo2063)
* [build] Improve `setup.py`
* [update] Do not check `_update_spec` when up to date
* [update] Prepare to remove Python 3.6 support
* [compat] Let PyInstaller detect _legacy module
* [devscripts/update-formulae] Do not change dependency section
* [test] Split download tests so they can be more easily run in CI
* [docs] Improve docstring of `download_ranges` by [FirefoxMetzger](https://github.com/FirefoxMetzger)
* [docs] Improve issue templates
* [build] Fix bug in [6d916fe](https://github.com/yt-dlp/yt-dlp/commit/6d916fe709a38e8c4c69b73843acf170b5165931)
* [cleanup, utils] Refactor parse_codecs
* [cleanup] Misc fixes and cleanup
* [extractor/acfun] Add extractors by [lockmatrix](https://github.com/lockmatrix)
* [extractor/Audiodraft] Add extractors by [Ashish0804](https://github.com/Ashish0804), [fstirlitz](https://github.com/fstirlitz)
* [extractor/cellebrite] Add extractor by [HobbyistDev](https://github.com/HobbyistDev)
* [extractor/detik] Add extractor by [HobbyistDev](https://github.com/HobbyistDev)
* [extractor/hytale] Add extractor by [llamasblade](https://github.com/llamasblade), [pukkandan](https://github.com/pukkandan)
* [extractor/liputan6] Add extractor by [HobbyistDev](https://github.com/HobbyistDev)
* [extractor/mocha] Add extractor by [HobbyistDev](https://github.com/HobbyistDev)
* [extractor/rtl.lu] Add extractor by [HobbyistDev](https://github.com/HobbyistDev)
* [extractor/rtvsl] Add extractor by [iw0nderhow](https://github.com/iw0nderhow), [pukkandan](https://github.com/pukkandan)
* [extractor/StarTrek] Add extractor by [scy](https://github.com/scy)
* [extractor/syvdk] Add extractor by [misaelaguayo](https://github.com/misaelaguayo)
* [extractor/theholetv] Add extractor by [dosy4ev](https://github.com/dosy4ev)
* [extractor/TubeTuGraz] Add extractor by [Ferdi265](https://github.com/Ferdi265), [pukkandan](https://github.com/pukkandan)
* [extractor/tviplayer] Add extractor by [HobbyistDev](https://github.com/HobbyistDev)
* [extractor/wetv] Add extractors by [elyse0](https://github.com/elyse0)
* [extractor/wikimedia] Add extractor by [EhtishamSabir](https://github.com/EhtishamSabir), [pukkandan](https://github.com/pukkandan)
* [extractor/youtube] Fix duration check for post-live manifestless mode
* [extractor/youtube] More metadata for storyboards by [ftk](https://github.com/ftk)
* [extractor/bigo] Fix extractor by [Lesmiscore](https://github.com/Lesmiscore)
* [extractor/BiliIntl] Fix subtitle extraction by [MinePlayersPE](https://github.com/MinePlayersPE)
* [extractor/crunchyroll] Improve `_VALID_URL`
* [extractor/fifa] Fix extractor by [ischmidt20](https://github.com/ischmidt20)
* [extractor/instagram] Fix post/story extractors by [pritam20ps05](https://github.com/pritam20ps05), [pukkandan](https://github.com/pukkandan)
* [extractor/iq] Set language correctly for Korean subtitles
* [extractor/MangoTV] Fix subtitle languages
* [extractor/Netverse] Improve playlist extractor by [HobbyistDev](https://github.com/HobbyistDev)
* [extractor/philharmoniedeparis] Fix extractor by [sqrtNOT](https://github.com/sqrtNOT)
* [extractor/Trovo] Fix extractor by [u-spec-png](https://github.com/u-spec-png)
* [extractor/twitch] Support storyboards for VODs by [ftk](https://github.com/ftk)
* [extractor/WatchESPN] Improve `_VALID_URL` by [IONECarter](https://github.com/IONECarter), [dirkf](https://github.com/dirkf)
* [extractor/WSJArticle] Fix video id extraction by [sqrtNOT](https://github.com/sqrtNOT)
* [extractor/Ximalaya] Fix extractors by [lockmatrix](https://github.com/lockmatrix)
* [cleanup, extractor/youtube] Fix tests by [sheerluck](https://github.com/sheerluck)
### 2022.06.29
* Fix `--downloader native`

View File

@ -17,8 +17,8 @@ pypi-files: AUTHORS Changelog.md LICENSE README.md README.txt supportedsites \
clean-test:
rm -rf test/testdata/sigs/player-*.js tmp/ *.annotations.xml *.aria2 *.description *.dump *.frag \
*.frag.aria2 *.frag.urls *.info.json *.live_chat.json *.meta *.part* *.tmp *.temp *.unknown_video *.ytdl \
*.3gp *.ape *.ass *.avi *.desktop *.f4v *.flac *.flv *.jpeg *.jpg *.m4a *.mpga *.m4v *.mhtml *.mkv *.mov \
*.mp3 *.mp4 *.ogg *.opus *.png *.sbv *.srt *.swf *.swp *.ttml *.url *.vtt *.wav *.webloc *.webm *.webp
*.3gp *.ape *.ass *.avi *.desktop *.flac *.flv *.jpeg *.jpg *.m4a *.m4v *.mhtml *.mkv *.mov *.mp3 \
*.mp4 *.ogg *.opus *.png *.sbv *.srt *.swf *.swp *.ttml *.url *.vtt *.wav *.webloc *.webm *.webp
clean-dist:
rm -rf yt-dlp.1.temp.md yt-dlp.1 README.txt MANIFEST build/ dist/ .coverage cover/ yt-dlp.tar.gz completions/ \
yt_dlp/extractor/lazy_extractors.py *.spec CONTRIBUTING.md.tmp yt-dlp yt-dlp.exe yt_dlp.egg-info/ AUTHORS .mailmap

View File

@ -4,7 +4,6 @@
- **17live**
- **17live:clip**
- **1tv**: Первый канал
- **20.detik.com**
- **20min**
- **23video**
- **247sports**
@ -32,8 +31,6 @@
- **AcademicEarth:Course**
- **acast**
- **acast:channel**
- **AcFunBangumi**
- **AcFunVideo**
- **ADN**: [<abbr title="netrc machine"><em>animedigitalnetwork</em></abbr>] Anime Digital Network
- **AdobeConnect**
- **adobetv**
@ -97,8 +94,6 @@
- **ATVAt**
- **AudiMedia**
- **AudioBoom**
- **Audiodraft:custom**
- **Audiodraft:generic**
- **audiomack**
- **audiomack:album**
- **Audius**: Audius.co
@ -210,7 +205,6 @@
- **CCMA**
- **CCTV**: 央视网
- **CDA**
- **Cellebrite**
- **CeskaTelevize**
- **CGTN**
- **channel9**: Channel 9
@ -509,7 +503,6 @@
- **HungamaSong**
- **huya:live**: huya.com
- **Hypem**
- **Hytale**
- **Icareus**
- **ign.com**
- **IGNArticle**
@ -622,7 +615,6 @@
- **linkedin:learning**: [<abbr title="netrc machine"><em>linkedin</em></abbr>]
- **linkedin:learning:course**: [<abbr title="netrc machine"><em>linkedin</em></abbr>]
- **LinuxAcademy**: [<abbr title="netrc machine"><em>linuxacademy</em></abbr>]
- **Liputan6**
- **LiTV**
- **LiveJournal**
- **livestream**
@ -706,7 +698,6 @@
- **MLSSoccer**
- **Mnet**
- **MNetTV**: [<abbr title="netrc machine"><em>mnettv</em></abbr>]
- **MochaVideo**
- **MoeVideo**: LetitBit video services: moevideo.net, playreplay.net and videochart.net
- **Mofosex**
- **MofosexEmbed**
@ -1077,14 +1068,10 @@
- **RTDocumentryPlaylist**
- **rte**: Raidió Teilifís Éireann TV
- **rte:radio**: Raidió Teilifís Éireann radio
- **rtl.lu:article**
- **rtl.lu:tele-vod**
- **rtl.nl**: rtl.nl and rtlxl.nl
- **rtl2**
- **rtl2:you**
- **rtl2:you:series**
- **RTLLuLive**
- **RTLLuRadio**
- **RTNews**
- **RTP**
- **RTRFM**
@ -1096,7 +1083,6 @@
- **rtve.es:television**
- **RTVNH**
- **RTVS**
- **rtvslo.si**
- **RUHD**
- **Rule34Video**
- **RumbleChannel**
@ -1205,7 +1191,6 @@
- **SRGSSR**
- **SRGSSRPlay**: srf.ch, rts.ch, rsi.ch, rtr.ch and swissinfo.ch play sites
- **stanfordoc**: Stanford Open ClassRoom
- **StarTrek**
- **startv**
- **Steam**
- **SteamCommunityBroadcast**
@ -1233,7 +1218,6 @@
- **SVTSeries**
- **SWRMediathek**
- **Syfy**
- **SYVDK**
- **SztvHu**
- **t-online.de**
- **Tagesschau**
@ -1272,7 +1256,6 @@
- **TenPlay**: [<abbr title="netrc machine"><em>10play</em></abbr>]
- **TF1**
- **TFO**
- **TheHoleTv**
- **TheIntercept**
- **ThePlatform**
- **ThePlatformFeed**
@ -1315,8 +1298,6 @@
- **TruNews**
- **TruTV**
- **Tube8**
- **TubeTuGraz**: [<abbr title="netrc machine"><em>tubetugraz</em></abbr>] tube.tugraz.at
- **TubeTuGrazSeries**: [<abbr title="netrc machine"><em>tubetugraz</em></abbr>]
- **TubiTv**: [<abbr title="netrc machine"><em>tubitv</em></abbr>]
- **TubiTvShow**
- **Tumblr**: [<abbr title="netrc machine"><em>tumblr</em></abbr>]
@ -1345,7 +1326,6 @@
- **TVCArticle**
- **TVer**
- **tvigle**: Интернет-телевидение Tvigle.ru
- **TVIPlayer**
- **tvland.com**
- **TVN24**
- **TVNet**
@ -1518,10 +1498,7 @@
- **Weibo**
- **WeiboMobile**
- **WeiqiTV**: WQTV
- **wetv:episode**
- **WeTvSeries**
- **whowatch**
- **wikimedia.org**
- **Willow**
- **WimTV**
- **Wistia**

View File

@ -306,7 +306,7 @@ class YoutubeDL:
client_certificate_password: Password for client certificate private key, if encrypted.
If not provided and the key is encrypted, yt-dlp will ask interactively
prefer_insecure: Use HTTP instead of HTTPS to retrieve information.
(Only supported by some extractors)
At the moment, this is only supported by YouTube.
http_headers: A dictionary of custom headers to be used for all requests
proxy: URL of the proxy server to use
geo_verification_proxy: URL of the proxy to use for IP address verification
@ -589,7 +589,7 @@ class YoutubeDL:
if current_version < MIN_RECOMMENDED:
msg = ('Support for Python version %d.%d has been deprecated. '
'See https://github.com/yt-dlp/yt-dlp/issues/3764 for more details.'
'\n You will no longer receive updates on this version')
'\n You will no longer recieve updates on this version')
if current_version < MIN_SUPPORTED:
msg = 'Python version %d.%d is no longer supported'
self.deprecation_warning(
@ -1693,7 +1693,7 @@ class YoutubeDL:
assert ie_result['_type'] in ('playlist', 'multi_video')
title = ie_result.get('title') or ie_result.get('id') or '<Untitled>'
self.to_screen(f'[download] Downloading {ie_result["_type"]}: {title}')
self.to_screen(f'[download] Downloading playlist: {title}')
all_entries = PlaylistEntries(self, ie_result)
entries = orderedSet(all_entries.get_requested_items(), lazy=True)

View File

@ -24,10 +24,6 @@ else:
return intlist_to_bytes(aes_gcm_decrypt_and_verify(*map(bytes_to_intlist, (data, key, tag, nonce))))
def aes_cbc_encrypt_bytes(data, key, iv, **kwargs):
return intlist_to_bytes(aes_cbc_encrypt(*map(bytes_to_intlist, (data, key, iv)), **kwargs))
def unpad_pkcs7(data):
return data[:-compat_ord(data[-1])]
@ -168,7 +164,7 @@ def aes_cbc_decrypt(data, key, iv):
return decrypted_data
def aes_cbc_encrypt(data, key, iv, *, padding_mode='pkcs7'):
def aes_cbc_encrypt(data, key, iv, padding_mode='pkcs7'):
"""
Encrypt with aes in CBC mode
@ -534,21 +530,13 @@ def ghash(subkey, data):
__all__ = [
'aes_ctr_decrypt',
'aes_cbc_decrypt',
'aes_cbc_decrypt_bytes',
'aes_ctr_decrypt',
'aes_decrypt_text',
'aes_decrypt',
'aes_ecb_decrypt',
'aes_encrypt',
'aes_gcm_decrypt_and_verify',
'aes_gcm_decrypt_and_verify_bytes',
'aes_cbc_encrypt',
'aes_cbc_encrypt_bytes',
'aes_ctr_encrypt',
'aes_ecb_encrypt',
'aes_encrypt',
'key_expansion',
'pad_block',
'unpad_pkcs7',

View File

@ -1474,7 +1474,6 @@ from .rtve import (
)
from .rtvnh import RTVNHIE
from .rtvs import RTVSIE
from .rtvslo import RTVSLOIE
from .ruhd import RUHDIE
from .rule34video import Rule34VideoIE
from .rumble import (
@ -2088,8 +2087,6 @@ from .weibo import (
WeiboMobileIE
)
from .weiqitv import WeiqiTVIE
from .wetv import WeTvEpisodeIE, WeTvSeriesIE
from .wikimedia import WikimediaIE
from .willow import WillowIE
from .wimtv import WimTVIE
from .whowatch import WhoWatchIE

View File

@ -795,14 +795,12 @@ class BiliIntlBaseIE(InfoExtractor):
def _get_subtitles(self, *, ep_id=None, aid=None):
sub_json = self._call_api(
'/web/v2/subtitle', ep_id or aid, fatal=False,
note='Downloading subtitles list', errnote='Unable to download subtitles list',
query=filter_dict({
'/web/v2/subtitle', ep_id or aid, note='Downloading subtitles list',
errnote='Unable to download subtitles list', query=filter_dict({
'platform': 'web',
's_locale': 'en_US',
'episode_id': ep_id,
'aid': aid,
})) or {}
}))
subtitles = {}
for sub in sub_json.get('subtitles') or []:
sub_url = sub.get('url')

View File

@ -931,9 +931,9 @@ class InfoExtractor:
def __print_error(self, errnote, fatal, video_id, err):
if fatal:
raise ExtractorError(f'{video_id}: {errnote}', cause=err)
raise ExtractorError(f'{video_id}: {errnote} ', cause=err)
elif errnote:
self.report_warning(f'{video_id}: {errnote}: {err}')
self.report_warning(f'{video_id}: {errnote} {err}')
def _parse_xml(self, xml_string, video_id, transform_source=None, fatal=True, errnote=None):
if transform_source:

View File

@ -67,7 +67,7 @@ class MGTVIE(InfoExtractor):
def _real_extract(self, url):
video_id = self._match_id(url)
tk2 = base64.urlsafe_b64encode(
f'did={str(uuid.uuid4())}|pno=1030|ver=0.3.0301|clit={int(time.time())}'.encode())[::-1]
f'did={compat_str(uuid.uuid4()).encode()}|pno=1030|ver=0.3.0301|clit={int(time.time())}'.encode())[::-1]
try:
api_data = self._download_json(
'https://pcweb.api.mgtv.com/player/video', video_id, query={
@ -137,15 +137,14 @@ class MGTVIE(InfoExtractor):
url_sub = sub.get('url')
if not url_sub:
continue
locale = sub.get('captionSimpleName') or 'en'
locale = sub.get('captionCountrySimpleName')
sub = self._download_json(f'{domain}{url_sub}', video_id, fatal=False,
note=f'Download subtitle for locale {sub.get("name")} ({locale})') or {}
sub_url = url_or_none(sub.get('info'))
if not sub_url:
continue
subtitles.setdefault(locale.lower(), []).append({
subtitles.setdefault(locale or 'en', []).append({
'url': sub_url,
'name': sub.get('name'),
'ext': 'srt'
})
return subtitles

View File

@ -1,6 +1,12 @@
import functools
from .common import InfoExtractor
from .dailymotion import DailymotionIE
from ..utils import smuggle_url, traverse_obj
from ..utils import (
InAdvancePagedList,
smuggle_url,
traverse_obj,
)
class NetverseBaseIE(InfoExtractor):
@ -8,13 +14,16 @@ class NetverseBaseIE(InfoExtractor):
'watch': 'watchvideo',
'video': 'watchvideo',
'webseries': 'webseries',
'season': 'webseason_videos',
}
def _call_api(self, slug, endpoint, query={}, season_id='', display_id=None):
return self._download_json(
f'https://api.netverse.id/medias/api/v2/{self._ENDPOINTS[endpoint]}/{slug}/{season_id}',
display_id or slug, query=query)
def _call_api(self, url, query={}):
display_id, sites_type = self._match_valid_url(url).group('display_id', 'type')
json_data = self._download_json(
f'https://api.netverse.id/medias/api/v2/{self._ENDPOINTS[sites_type]}/{display_id}',
display_id, query=query)
return display_id, json_data
class NetverseIE(NetverseBaseIE):
@ -27,9 +36,10 @@ class NetverseIE(NetverseBaseIE):
'title': 'Waktu Indonesia Bercanda - Edisi Spesial Lebaran 2016',
'ext': 'mp4',
'season': 'Season 2016',
'description': 'md5:d41d8cd98f00b204e9800998ecf8427e',
'thumbnail': r're:https?://s\d+\.dmcdn\.net/v/T7aV31Y0eGRWBbwkK/x1080',
'description': 'md5:fc27747c0aa85067b6967c816f01617c',
'thumbnail': 'https://vplayed-uat.s3-ap-southeast-1.amazonaws.com/images/webseries/thumbnails/2021/11/619cfce45c827.jpeg',
'episode_number': 22,
'series': 'Waktu Indonesia Bercanda',
'episode': 'Episode 22',
'uploader_id': 'x2ir3vq',
'age_limit': 0,
@ -50,9 +60,10 @@ class NetverseIE(NetverseBaseIE):
'title': 'Jadoo Seorang Model',
'ext': 'mp4',
'season': 'Season 2',
'description': 'md5:8a74f70812cca267e19ee0635f0af835',
'thumbnail': r're:https?://s\d+\.dmcdn\.net/v/Thwuy1YURicFmGu0v/x1080',
'description': 'md5:c616e8e59d3edf2d3d506e3736120d99',
'thumbnail': 'https://storage.googleapis.com/netprime-live/images/webseries/thumbnails/2021/11/619cf63f105d3.jpeg',
'episode_number': 2,
'series': 'Hello Jadoo',
'episode': 'Episode 2',
'view_count': int,
'like_count': int,
@ -74,9 +85,10 @@ class NetverseIE(NetverseBaseIE):
'ext': 'mp4',
'title': 'Tetangga Baru',
'season': 'Season 1',
'description': 'md5:23fcf70e97d461d3029d25d59b2ccfb9',
'thumbnail': r're:https?://s\d+\.dmcdn\.net/v/T3Ogm1YEnnyjVKAFF/x1080',
'description': 'md5:ed6dd355bed84d139b1154c3d8d65957',
'thumbnail': 'https://vplayed-uat.s3-ap-southeast-1.amazonaws.com/images/webseries/thumbnails/2021/11/619cfd9d32c5f.jpeg',
'episode_number': 1,
'series': 'Tetangga Masa Gitu',
'episode': 'Episode 1',
'timestamp': 1624538169,
'view_count': int,
@ -96,11 +108,12 @@ class NetverseIE(NetverseBaseIE):
'info_dict': {
'id': 'x887jzz',
'ext': 'mp4',
'thumbnail': r're:https?://s\d+\.dmcdn\.net/v/TfuZ_1Y6PboJ5An_s/x1080',
'thumbnail': 'https://storage.googleapis.com/netprime-live/images/webseries/thumbnails/2021/11/619cf63f105d3.jpeg',
'season': 'Season 1',
'episode_number': 1,
'description': 'md5:d4f627b3e7a3f9acdc55f6cdd5ea41d5',
'description': 'md5:c616e8e59d3edf2d3d506e3736120d99',
'title': 'Namaku Choi Jadoo',
'series': 'Hello Jadoo',
'episode': 'Episode 1',
'age_limit': 0,
'like_count': int,
@ -117,8 +130,7 @@ class NetverseIE(NetverseBaseIE):
}]
def _real_extract(self, url):
display_id, sites_type = self._match_valid_url(url).group('display_id', 'type')
program_json = self._call_api(display_id, sites_type)
display_id, program_json = self._call_api(url)
videos = program_json['response']['videos']
return {
@ -131,46 +143,34 @@ class NetverseIE(NetverseBaseIE):
'thumbnail': traverse_obj(videos, ('program_detail', 'thumbnail_image')),
'description': traverse_obj(videos, ('program_detail', 'description')),
'episode_number': videos.get('episode_order'),
'series': traverse_obj(videos, ('program_detail', 'title')),
}
class NetversePlaylistIE(NetverseBaseIE):
_VALID_URL = r'https?://(?:\w+\.)?netverse\.id/(?P<type>webseries)/(?P<display_id>[^/?#&]+)'
_TESTS = [{
# multiple season
_TEST = {
'url': 'https://netverse.id/webseries/tetangga-masa-gitu',
'info_dict': {
'id': 'tetangga-masa-gitu',
'title': 'Tetangga Masa Gitu',
},
'playlist_count': 519,
}, {
# single season
'url': 'https://netverse.id/webseries/kelas-internasional',
'info_dict': {
'id': 'kelas-internasional',
'title': 'Kelas Internasional',
},
'playlist_count': 203,
}]
'playlist_count': 46,
}
def parse_playlist(self, json_data, playlist_id):
slug_sample = traverse_obj(json_data, ('related', 'data', ..., 'slug'))[0]
for season in traverse_obj(json_data, ('seasons', ..., 'id')):
playlist_json = self._call_api(
slug_sample, 'season', display_id=playlist_id, season_id=season)
for current_page in range(playlist_json['response']['season_list']['last_page']):
playlist_json = self._call_api(slug_sample, 'season', query={'page': current_page + 1},
season_id=season, display_id=playlist_id)
for slug in traverse_obj(playlist_json, ('response', ..., 'data', ..., 'slug')):
yield self.url_result(f'https://www.netverse.id/video/{slug}', NetverseIE)
def parse_playlist(self, url, page_num):
_, playlist_json = self._call_api(url, query={'page': page_num + 1})
for slug in traverse_obj(playlist_json, ('response', 'related', 'data', ..., 'slug')):
yield self.url_result(f'https://www.netverse.id/video/{slug}', NetverseIE)
def _real_extract(self, url):
playlist_id, sites_type = self._match_valid_url(url).group('display_id', 'type')
playlist_data = self._call_api(playlist_id, sites_type)
_, playlist_data = self._call_api(url)
webseries_related_info = playlist_data['response']['related']
# TODO: get video from other season
# The season has id and the next season video is located at api_url/<season_id>?page=<page>
return self.playlist_result(
self.parse_playlist(playlist_data['response'], playlist_id),
InAdvancePagedList(functools.partial(self.parse_playlist, url),
webseries_related_info['last_page'],
webseries_related_info['to'] - webseries_related_info['from'] + 1),
traverse_obj(playlist_data, ('response', 'webseries_info', 'slug')),
traverse_obj(playlist_data, ('response', 'webseries_info', 'title')))

View File

@ -104,8 +104,9 @@ class PhantomJSwrapper:
self.exe = check_executable('phantomjs', ['-v'])
if not self.exe:
raise ExtractorError(
'PhantomJS not found, Please download it from https://phantomjs.org/download.html', expected=True)
raise ExtractorError('PhantomJS executable not found in PATH, '
'download it from http://phantomjs.org',
expected=True)
self.extractor = extractor

View File

@ -1,151 +0,0 @@
from .common import InfoExtractor
from ..utils import (
ExtractorError,
parse_duration,
traverse_obj,
unified_timestamp,
url_or_none,
)
class RTVSLOIE(InfoExtractor):
IE_NAME = 'rtvslo.si'
_VALID_URL = r'''(?x)
https?://(?:
(?:365|4d)\.rtvslo.si/arhiv/[^/?#&;]+|
(?:www\.)?rtvslo\.si/rtv365/arhiv
)/(?P<id>\d+)'''
_GEO_COUNTRIES = ['SI']
_API_BASE = 'https://api.rtvslo.si/ava/{}/{}?client_id=82013fb3a531d5414f478747c1aca622'
SUB_LANGS_MAP = {'Slovenski': 'sl'}
_TESTS = [
{
'url': 'https://www.rtvslo.si/rtv365/arhiv/174842550?s=tv',
'info_dict': {
'id': '174842550',
'ext': 'flv',
'release_timestamp': 1643140032,
'upload_date': '20220125',
'series': 'Dnevnik',
'thumbnail': 'https://img.rtvcdn.si/_up/ava/ava_misc/show_logos/92/dnevnik_3_wide2.jpg',
'description': 'md5:76a18692757aeb8f0f51221106277dd2',
'timestamp': 1643137046,
'title': 'Dnevnik',
'series_id': '92',
'release_date': '20220125',
'duration': 1789,
},
}, {
'url': 'https://365.rtvslo.si/arhiv/utrip/174843754',
'info_dict': {
'id': '174843754',
'ext': 'mp4',
'series_id': '94',
'release_date': '20220129',
'timestamp': 1643484455,
'title': 'Utrip',
'duration': 813,
'thumbnail': 'https://img.rtvcdn.si/_up/ava/ava_misc/show_logos/94/utrip_1_wide2.jpg',
'description': 'md5:77f2892630c7b17bb7a5bb84319020c9',
'release_timestamp': 1643485825,
'upload_date': '20220129',
'series': 'Utrip',
},
}, {
'url': 'https://365.rtvslo.si/arhiv/il-giornale-della-sera/174844609',
'info_dict': {
'id': '174844609',
'ext': 'mp3',
'series_id': '106615841',
'title': 'Il giornale della sera',
'duration': 1328,
'series': 'Il giornale della sera',
'timestamp': 1643743800,
'release_timestamp': 1643745424,
'thumbnail': 'https://img.rtvcdn.si/_up/ava/ava_misc/show_logos/il-giornale-della-sera_wide2.jpg',
'upload_date': '20220201',
'tbr': 128000,
'release_date': '20220201',
},
}, {
'url': 'https://4d.rtvslo.si/arhiv/dnevnik/174842550',
'only_matching': True
}
]
def _real_extract(self, url):
v_id = self._match_id(url)
meta = self._download_json(self._API_BASE.format('getRecordingDrm', v_id), v_id)['response']
thumbs = [{'id': k, 'url': v, 'http_headers': {'Accept': 'image/jpeg'}}
for k, v in (meta.get('images') or {}).items()]
subs = {}
for s in traverse_obj(meta, 'subs', 'subtitles', default=[]):
lang = self.SUB_LANGS_MAP.get(s.get('language'), s.get('language') or 'und')
subs.setdefault(lang, []).append({
'url': s.get('file'),
'ext': traverse_obj(s, 'format', expected_type=str.lower),
})
jwt = meta.get('jwt')
if not jwt:
raise ExtractorError('Site did not provide an authentication token, cannot proceed.')
media = self._download_json(self._API_BASE.format('getMedia', v_id), v_id, query={'jwt': jwt})['response']
formats = []
adaptive_url = traverse_obj(media, ('addaptiveMedia', 'hls_sec'), expected_type=url_or_none)
if adaptive_url:
formats = self._extract_wowza_formats(adaptive_url, v_id, skip_protocols=['smil'])
adaptive_url = traverse_obj(media, ('addaptiveMedia_sl', 'hls_sec'), expected_type=url_or_none)
if adaptive_url:
for f in self._extract_wowza_formats(adaptive_url, v_id, skip_protocols=['smil']):
formats.append({
**f,
'format_id': 'sign-' + f['format_id'],
'format_note': 'Sign language interpretation', 'preference': -10,
'language': (
'slv' if f.get('language') == 'eng' and f.get('acodec') != 'none'
else f.get('language'))
})
formats.extend(
{
'url': f['streams'][strm],
'ext': traverse_obj(f, 'mediaType', expected_type=str.lower),
'width': f.get('width'),
'height': f.get('height'),
'tbr': f.get('bitrate'),
'filesize': f.get('filesize'),
}
for strm in ('http', 'https')
for f in media.get('mediaFiles') or []
if traverse_obj(f, ('streams', strm))
)
if any('intermission.mp4' in x['url'] for x in formats):
self.raise_geo_restricted(countries=self._GEO_COUNTRIES, metadata_available=True)
if any('dummy_720p.mp4' in x.get('manifest_url', '') for x in formats) and meta.get('stub') == 'error':
raise ExtractorError(f'{self.IE_NAME} said: Clip not available', expected=True)
self._sort_formats(formats)
return {
'id': v_id,
'webpage_url': ''.join(traverse_obj(meta, ('canonical', ('domain', 'path')))),
'title': meta.get('title'),
'formats': formats,
'subtitles': subs,
'thumbnails': thumbs,
'description': meta.get('description'),
'timestamp': unified_timestamp(traverse_obj(meta, 'broadcastDate', ('broadcastDates', 0))),
'release_timestamp': unified_timestamp(meta.get('recordingDate')),
'duration': meta.get('duration') or parse_duration(meta.get('length')),
'tags': meta.get('genre'),
'series': meta.get('showName'),
'series_id': meta.get('showId'),
}

View File

@ -1,208 +0,0 @@
import functools
import re
import time
from .common import InfoExtractor
from ..aes import aes_cbc_encrypt_bytes
from ..utils import determine_ext, int_or_none, traverse_obj, urljoin
class WeTvBaseIE(InfoExtractor):
_VALID_URL_BASE = r'https?://(?:www\.)?wetv\.vip/(?:[^?#]+/)?play'
def _get_ckey(self, video_id, url, app_version, platform):
ua = self.get_param('http_headers')['User-Agent']
payload = (f'{video_id}|{int(time.time())}|mg3c3b04ba|{app_version}|0000000000000000|'
f'{platform}|{url[:48]}|{ua.lower()[:48]}||Mozilla|Netscape|Win32|00|')
return aes_cbc_encrypt_bytes(
bytes(f'|{sum(map(ord, payload))}|{payload}', 'utf-8'),
b'Ok\xda\xa3\x9e/\x8c\xb0\x7f^r-\x9e\xde\xf3\x14',
b'\x01PJ\xf3V\xe6\x19\xcf.B\xbb\xa6\x8c?p\xf9',
padding_mode='whitespace').hex()
def _get_video_api_response(self, video_url, video_id, series_id, subtitle_format, video_format, video_quality):
app_version = '3.5.57'
platform = '4830201'
ckey = self._get_ckey(video_id, video_url, app_version, platform)
query = {
'vid': video_id,
'cid': series_id,
'cKey': ckey,
'encryptVer': '8.1',
'spcaptiontype': '1' if subtitle_format == 'vtt' else '0', # 0 - SRT, 1 - VTT
'sphls': '1' if video_format == 'hls' else '0', # 0 - MP4, 1 - HLS
'defn': video_quality, # '': 480p, 'shd': 720p, 'fhd': 1080p
'spsrt': '1', # Enable subtitles
'sphttps': '1', # Enable HTTPS
'otype': 'json', # Response format: xml, json,
'dtype': '1',
'spwm': '1',
'host': 'wetv.vip', # These three values are needed for SHD
'referer': 'wetv.vip',
'ehost': video_url,
'appVer': app_version,
'platform': platform,
}
return self._search_json(r'QZOutputJson=', self._download_webpage(
'https://play.wetv.vip/getvinfo', video_id, query=query), 'api_response', video_id)
def _get_webpage_metadata(self, webpage, video_id):
return self._parse_json(
traverse_obj(self._search_nextjs_data(webpage, video_id), ('props', 'pageProps', 'data')),
video_id, fatal=False)
class WeTvEpisodeIE(WeTvBaseIE):
IE_NAME = 'wetv:episode'
_VALID_URL = WeTvBaseIE._VALID_URL_BASE + r'/(?P<series_id>\w+)(?:-[^?#]+)?/(?P<id>\w+)(?:-[^?#]+)?'
_TESTS = [{
'url': 'https://wetv.vip/en/play/air11ooo2rdsdi3-Cute-Programmer/v0040pr89t9-EP1-Cute-Programmer',
'md5': 'a046f565c9dce9b263a0465a422cd7bf',
'info_dict': {
'id': 'v0040pr89t9',
'ext': 'mp4',
'title': 'EP1: Cute Programmer',
'description': 'md5:e87beab3bf9f392d6b9e541a63286343',
'thumbnail': r're:^https?://[^?#]+air11ooo2rdsdi3',
'series': 'Cute Programmer',
'episode': 'Episode 1',
'episode_number': 1,
'duration': 2835,
},
}, {
'url': 'https://wetv.vip/en/play/u37kgfnfzs73kiu/p0039b9nvik',
'md5': '4d9d69bcfd11da61f4aae64fc6b316b3',
'info_dict': {
'id': 'p0039b9nvik',
'ext': 'mp4',
'title': 'EP1: You Are My Glory',
'description': 'md5:831363a4c3b4d7615e1f3854be3a123b',
'thumbnail': r're:^https?://[^?#]+u37kgfnfzs73kiu',
'series': 'You Are My Glory',
'episode': 'Episode 1',
'episode_number': 1,
'duration': 2454,
},
}, {
'url': 'https://wetv.vip/en/play/lcxgwod5hapghvw-WeTV-PICK-A-BOO/i0042y00lxp-Zhao-Lusi-Describes-The-First-Experiences-She-Had-In-Who-Rules-The-World-%7C-WeTV-PICK-A-BOO',
'md5': '71133f5c2d5d6cad3427e1b010488280',
'info_dict': {
'id': 'i0042y00lxp',
'ext': 'mp4',
'title': 'md5:f7a0857dbe5fbbe2e7ad630b92b54e6a',
'description': 'md5:76260cb9cdc0ef76826d7ca9d92fadfa',
'thumbnail': r're:^https?://[^?#]+lcxgwod5hapghvw',
'series': 'WeTV PICK-A-BOO',
'episode': 'Episode 0',
'episode_number': 0,
'duration': 442,
},
}]
def _extract_video_formats_and_subtitles(self, api_response, video_id, video_quality):
video_response = api_response['vl']['vi'][0]
video_width = video_response.get('vw')
video_height = video_response.get('vh')
formats, subtitles = [], {}
for video_format in video_response['ul']['ui']:
if video_format.get('hls'):
fmts, subs = self._extract_m3u8_formats_and_subtitles(
video_format['url'] + video_format['hls']['pname'], video_id, 'mp4', fatal=False)
for f in fmts:
f['width'] = video_width
f['height'] = video_height
formats.extend(fmts)
self._merge_subtitles(subs, target=subtitles)
else:
formats.append({
'url': f'{video_format["url"]}{video_response["fn"]}?vkey={video_response["fvkey"]}',
'width': video_width,
'height': video_height,
'ext': 'mp4',
})
return formats, subtitles
def _extract_video_subtitles(self, api_response, subtitles_format):
subtitles = {}
for subtitle in traverse_obj(api_response, ('sfl', 'fi')):
subtitles.setdefault(subtitle['lang'].lower(), []).append({
'url': subtitle['url'],
'ext': subtitles_format,
'protocol': 'm3u8_native' if determine_ext(subtitle['url']) == 'm3u8' else 'http',
})
return subtitles
def _real_extract(self, url):
video_id, series_id = self._match_valid_url(url).group('id', 'series_id')
webpage = self._download_webpage(url, video_id)
formats, subtitles = [], {}
for video_format, subtitle_format, video_quality in (('mp4', 'srt', ''), ('hls', 'vtt', 'shd'), ('hls', 'vtt', 'fhd')):
api_response = self._get_video_api_response(url, video_id, series_id, subtitle_format, video_format, video_quality)
fmts, subs = self._extract_video_formats_and_subtitles(api_response, video_id, video_quality)
native_subtitles = self._extract_video_subtitles(api_response, subtitle_format)
formats.extend(fmts)
self._merge_subtitles(subs, native_subtitles, target=subtitles)
self._sort_formats(formats)
webpage_metadata = self._get_webpage_metadata(webpage, video_id)
return {
'id': video_id,
'title': (self._og_search_title(webpage)
or traverse_obj(webpage_metadata, ('coverInfo', 'description'))),
'description': (self._og_search_description(webpage)
or traverse_obj(webpage_metadata, ('coverInfo', 'description'))),
'formats': formats,
'subtitles': subtitles,
'thumbnail': self._og_search_thumbnail(webpage),
'duration': int_or_none(traverse_obj(webpage_metadata, ('videoInfo', 'duration'))),
'series': traverse_obj(webpage_metadata, ('coverInfo', 'title')),
'episode_number': int_or_none(traverse_obj(webpage_metadata, ('videoInfo', 'episode'))),
}
class WeTvSeriesIE(WeTvBaseIE):
_VALID_URL = WeTvBaseIE._VALID_URL_BASE + r'/(?P<id>\w+)(?:-[^/?#]+)?/?(?:[?#]|$)'
_TESTS = [{
'url': 'https://wetv.vip/play/air11ooo2rdsdi3-Cute-Programmer',
'info_dict': {
'id': 'air11ooo2rdsdi3',
'title': 'Cute Programmer',
'description': 'md5:e87beab3bf9f392d6b9e541a63286343',
},
'playlist_count': 30,
}, {
'url': 'https://wetv.vip/en/play/u37kgfnfzs73kiu-You-Are-My-Glory',
'info_dict': {
'id': 'u37kgfnfzs73kiu',
'title': 'You Are My Glory',
'description': 'md5:831363a4c3b4d7615e1f3854be3a123b',
},
'playlist_count': 32,
}]
def _real_extract(self, url):
series_id = self._match_id(url)
webpage = self._download_webpage(url, series_id)
webpage_metadata = self._get_webpage_metadata(webpage, series_id)
episode_paths = (re.findall(r'<a[^>]+class="play-video__link"[^>]+href="(?P<path>[^"]+)', webpage)
or [f'/{series_id}/{episode["vid"]}' for episode in webpage_metadata.get('videoList')])
return self.playlist_from_matches(
episode_paths, series_id, ie=WeTvEpisodeIE, getter=functools.partial(urljoin, url),
title=traverse_obj(webpage_metadata, ('coverInfo', 'title')) or self._og_search_title(webpage),
description=traverse_obj(webpage_metadata, ('coverInfo', 'description')) or self._og_search_description(webpage))

View File

@ -1,55 +0,0 @@
import re
from .common import InfoExtractor
from ..utils import (
clean_html,
get_element_by_class,
parse_qs,
remove_start,
unescapeHTML,
urljoin,
)
class WikimediaIE(InfoExtractor):
IE_NAME = 'wikimedia.org'
_VALID_URL = r'https?://commons\.wikimedia\.org/wiki/File:(?P<id>[^/#?]+)\.\w+'
_TESTS = [{
'url': 'https://commons.wikimedia.org/wiki/File:Die_Temperaturkurve_der_Erde_(ZDF,_Terra_X)_720p_HD_50FPS.webm',
'info_dict': {
'url': 're:https?://upload.wikimedia.org/wikipedia',
'ext': 'webm',
'id': 'Die_Temperaturkurve_der_Erde_(ZDF,_Terra_X)_720p_HD_50FPS',
'title': 'Die Temperaturkurve der Erde (ZDF, Terra X) 720p HD 50FPS.webm - Wikimedia Commons',
'description': 'md5:7cd84f76e7081f1be033d0b155b4a460',
'license': 'Creative Commons Attribution 4.0 International',
'uploader': 'ZDF/Terra X/Gruppe 5/Luise Wagner, Jonas Sichert, Andreas Hougardy',
'subtitles': 'count:4'
}
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
subtitles = {}
for sub in set(re.findall(r'\bsrc\s*=\s*["\'](/w/api[^"]+)["\']', webpage)):
sub = urljoin('https://commons.wikimedia.org', unescapeHTML(sub))
qs = parse_qs(sub)
lang = qs.get('lang', [None])[-1]
sub_ext = qs.get('trackformat', [None])[-1]
if lang and sub_ext:
subtitles.setdefault(lang, []).append({'ext': sub_ext, 'url': sub})
return {
'id': video_id,
'url': self._html_search_regex(r'<source\s[^>]*\bsrc="([^"]+)"', webpage, 'video URL'),
'description': clean_html(get_element_by_class('description', webpage)),
'title': remove_start(self._og_search_title(webpage), 'File:'),
'license': self._html_search_regex(
r'licensed under(?: the)? (.+?) license',
get_element_by_class('licensetpl', webpage), 'license', default=None),
'uploader': self._html_search_regex(
r'>\s*Author\s*</td>\s*<td\b[^>]*>\s*([^<]+)\s*</td>', webpage, 'video author', default=None),
'subtitles': subtitles,
}

View File

@ -1,5 +1,5 @@
# Autogenerated by devscripts/update-version.py
__version__ = '2022.07.18'
__version__ = '2022.06.29'
RELEASE_GIT_HEAD = '135f05ef6'
RELEASE_GIT_HEAD = '9d339c41e'