Compare commits

...

4 Commits

Author SHA1 Message Date
HobbyistDev
67685a541d
[extractor/tempo] Add extractor (#4463)
Authored by: HobbyistDev
2022-07-27 17:18:42 +05:30
pukkandan
964b5493a4
Bugfix for f1042989c1 2022-07-27 17:13:04 +05:30
pukkandan
3955b20703
Fix bugs in 3bec830a59
Closes #4454
2022-07-27 17:10:26 +05:30
pukkandan
f1042989c1
[crunchyroll] Fix language code in _VALID_URLs
Closes #4451
2022-07-27 17:10:26 +05:30
4 changed files with 64 additions and 6 deletions

View File

@ -1713,7 +1713,7 @@ class YoutubeDL:
assert ie_result['_type'] in ('playlist', 'multi_video')
common_info = self._playlist_infodict(ie_result, strict=True)
title = common_info.get('title') or '<Untitled>'
title = common_info.get('playlist') or '<Untitled>'
if self._match_entry(common_info, incomplete=True) is not None:
return
self.to_screen(f'[download] Downloading {ie_result["_type"]}: {title}')
@ -1733,8 +1733,8 @@ class YoutubeDL:
# Better to do this after potentially exhausting entries
ie_result['playlist_count'] = all_entries.get_full_count()
common_info = self._playlist_infodict(ie_result, n_entries=int_or_none(n_entries))
ie_copy = collections.ChainMap(ie_result, common_info)
ie_copy = collections.ChainMap(
ie_result, self._playlist_infodict(ie_result, n_entries=int_or_none(n_entries)))
_infojson_written = False
write_playlist_files = self.params.get('allow_playlist_files', True)
@ -1782,6 +1782,7 @@ class YoutubeDL:
extra = {
**common_info,
'n_entries': int_or_none(n_entries),
'playlist_index': playlist_index,
'playlist_autonumber': i + 1,
}

View File

@ -1727,6 +1727,7 @@ from .telequebec import (
)
from .teletask import TeleTaskIE
from .telewebion import TelewebionIE
from .tempo import TempoIE
from .tennistv import TennisTVIE
from .tenplay import TenPlayIE
from .testurl import TestURLIE

View File

@ -649,7 +649,7 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
class CrunchyrollShowPlaylistIE(CrunchyrollBaseIE):
IE_NAME = 'crunchyroll:playlist'
_VALID_URL = r'https?://(?:(?P<prefix>www|m)\.)?(?P<url>crunchyroll\.com/(?:\w{1,2}/)?(?!(?:news|anime-news|library|forum|launchcalendar|lineup|store|comics|freetrial|login|media-\d+))(?P<id>[\w\-]+))/?(?:\?|$)'
_VALID_URL = r'https?://(?:(?P<prefix>www|m)\.)?(?P<url>crunchyroll\.com/(?:\w{2}(?:-\w{2})?/)?(?!(?:news|anime-news|library|forum|launchcalendar|lineup|store|comics|freetrial|login|media-\d+))(?P<id>[\w\-]+))/?(?:\?|$)'
_TESTS = [{
'url': 'https://www.crunchyroll.com/a-bridge-to-the-starry-skies-hoshizora-e-kakaru-hashi',
@ -757,7 +757,7 @@ class CrunchyrollBetaBaseIE(CrunchyrollBaseIE):
class CrunchyrollBetaIE(CrunchyrollBetaBaseIE):
IE_NAME = 'crunchyroll:beta'
_VALID_URL = r'https?://beta\.crunchyroll\.com/(?P<lang>(?:\w{1,2}/)?)watch/(?P<id>\w+)/(?P<display_id>[\w\-]*)/?(?:\?|$)'
_VALID_URL = r'https?://beta\.crunchyroll\.com/(?P<lang>(?:\w{2}(?:-\w{2})?/)?)watch/(?P<id>\w+)/(?P<display_id>[\w\-]*)/?(?:\?|$)'
_TESTS = [{
'url': 'https://beta.crunchyroll.com/watch/GY2P1Q98Y/to-the-future',
'info_dict': {
@ -801,6 +801,9 @@ class CrunchyrollBetaIE(CrunchyrollBetaBaseIE):
}, {
'url': 'https://beta.crunchyroll.com/watch/GY2P1Q98Y/',
'only_matching': True,
}, {
'url': 'https://beta.crunchyroll.com/pt-br/watch/G8WUN8VKP/the-ruler-of-conspiracy',
'only_matching': True,
}]
def _real_extract(self, url):
@ -880,7 +883,7 @@ class CrunchyrollBetaIE(CrunchyrollBetaBaseIE):
class CrunchyrollBetaShowIE(CrunchyrollBetaBaseIE):
IE_NAME = 'crunchyroll:playlist:beta'
_VALID_URL = r'https?://beta\.crunchyroll\.com/(?P<lang>(?:\w{1,2}/)?)series/(?P<id>\w+)/(?P<display_id>[\w\-]*)/?(?:\?|$)'
_VALID_URL = r'https?://beta\.crunchyroll\.com/(?P<lang>(?:\w{2}(?:-\w{2})?/)?)series/(?P<id>\w+)/(?P<display_id>[\w\-]*)/?(?:\?|$)'
_TESTS = [{
'url': 'https://beta.crunchyroll.com/series/GY19NQ2QR/Girl-Friend-BETA',
'info_dict': {

53
yt_dlp/extractor/tempo.py Normal file
View File

@ -0,0 +1,53 @@
from .common import InfoExtractor
from ..utils import int_or_none, parse_iso8601, str_or_none, traverse_obj
class TempoIE(InfoExtractor):
_VALID_URL = r'https?://video\.tempo\.co/\w+/\d+/(?P<id>[\w-]+)'
_TESTS = [{
'url': 'https://video.tempo.co/read/30058/anies-baswedan-ajukan-banding-putusan-ptun-batalkan-ump-dki',
'info_dict': {
'id': '2144438',
'ext': 'mp4',
'title': 'Anies Baswedan Ajukan Banding Putusan PTUN Batalkan UMP DKI',
'display_id': 'anies-baswedan-ajukan-banding-putusan-ptun-batalkan-ump-dki',
'duration': 84,
'description': 'md5:a6822b7c4c874fa7e5bd63e96a387b66',
'thumbnail': 'https://statik.tempo.co/data/2022/07/27/id_1128287/1128287_720.jpg',
'timestamp': 1658911277,
'upload_date': '20220727',
'tags': ['Anies Baswedan', ' PTUN', ' PTUN | Pengadilan Tata Usaha Negara', ' PTUN Batalkan UMP DKI', ' UMP DKI'],
}
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
player_key, widget_id = self._search_regex(
r'<ivs-player\s*[^>]+data-ivs-key\s*=\s*"(?P<player_key>[\w]+)[^>]+\bdata-ivs-wid="(?P<widget_id>[\w-]+)',
webpage, 'player_key, widget_id', group=('player_key', 'widget_id'))
json_ld_data = self._search_json_ld(webpage, display_id)
json_data = self._download_json(
f'https://ivxplayer.ivideosmart.com/prod/widget/{widget_id}',
display_id, query={'key': player_key})
formats, subtitles = self._extract_m3u8_formats_and_subtitles(
json_data['player']['video_url'], display_id, ext='mp4')
return {
'id': str(json_data['ivx']['id']),
'display_id': display_id,
'formats': formats,
'subtitles': subtitles,
'title': (self._html_search_meta('twitter:title', webpage) or self._og_search_title(webpage)
or traverse_obj(json_data, ('ivx', 'name'))),
'duration': int_or_none(traverse_obj(json_data, ('ivx', 'duration'))),
'thumbnail': (self._html_search_meta('twitter:image:src', webpage) or self._og_search_thumbnail(webpage)
or traverse_obj(json_data, ('ivx', 'thumbnail_url'))),
'description': (json_ld_data.get('description') or self._html_search_meta(['description', 'twitter:description'], webpage)
or self._og_search_description(webpage)),
'timestamp': parse_iso8601(traverse_obj(json_data, ('ivx', 'created_at'))),
'tags': str_or_none(self._html_search_meta('keywords', webpage), '').split(','),
}