mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2024-11-15 13:43:04 +00:00
Compare commits
2 Commits
a904a7f8c6
...
e3e606de12
Author | SHA1 | Date | |
---|---|---|---|
|
e3e606de12 | ||
|
88f60feb32 |
@ -1161,14 +1161,11 @@ Note that options in configuration file are just the same options aka switches u
|
||||
|
||||
You can use `--ignore-config` if you want to disable all configuration files for a particular yt-dlp run. If `--ignore-config` is found inside any configuration file, no further configuration will be loaded. For example, having the option in the portable configuration file prevents loading of home, user, and system configurations. Additionally, (for backward compatibility) if `--ignore-config` is found inside the system configuration file, the user configuration is not loaded.
|
||||
|
||||
### Specifying encoding of config files
|
||||
### Config file encoding
|
||||
|
||||
By default, config files are read in the encoding from system locale.
|
||||
If you saved your config file in a different encoding than that, you may write `# coding: ENCODING` to the beginning of the file. (e.g. `# coding: shift-jis`)
|
||||
The config files are decoded according to the UTF BOM if present, and in the encoding from system locale otherwise.
|
||||
|
||||
There must not be any characters before that, including spaces.
|
||||
|
||||
If you have BOM enabled, it will be used instead.
|
||||
If you want your file to be decoded differently, add `# coding: ENCODING` to the beginning of the file (e.g. `# coding: shift-jis`). There must be no characters before that, even spaces or BOM.
|
||||
|
||||
### Authentication with `.netrc` file
|
||||
|
||||
|
@ -1831,24 +1831,16 @@ Line 1
|
||||
self.assertEqual(determine_file_encoding(b'\x00\x00\xfe\xff'), ('utf-32-be', 4))
|
||||
self.assertEqual(determine_file_encoding(b'\xff\xfe'), ('utf-16-le', 2))
|
||||
|
||||
self.assertEqual(determine_file_encoding(b'# -*- coding: cp932 -*-'), ('cp932', 0))
|
||||
self.assertEqual(determine_file_encoding(b'# -*- coding: cp932 -*-\n'), ('cp932', 0))
|
||||
self.assertEqual(determine_file_encoding(b'# -*- coding: cp932 -*-\r\n'), ('cp932', 0))
|
||||
self.assertEqual(determine_file_encoding(b'\xff\xfe# coding: utf-8\n--verbose'), ('utf-16-le', 2))
|
||||
|
||||
self.assertEqual(determine_file_encoding(b'# coding: utf-8\n--verbose'), ('utf-8', 0))
|
||||
self.assertEqual(determine_file_encoding(b'# coding: someencodinghere-12345\n--verbose'), ('someencodinghere-12345', 0))
|
||||
|
||||
self.assertEqual(determine_file_encoding(b'# vi: set fileencoding=cp932'), ('cp932', 0))
|
||||
self.assertEqual(determine_file_encoding(b'# vi: set fileencoding=cp932\n'), ('cp932', 0))
|
||||
self.assertEqual(determine_file_encoding(b'# vi: set fileencoding=cp932\r\n'), ('cp932', 0))
|
||||
self.assertEqual(determine_file_encoding(b'# vi: set fileencoding=cp932,euc-jp\r\n'), ('cp932', 0))
|
||||
self.assertEqual(determine_file_encoding(b'#coding:utf-8\n--verbose'), ('utf-8', 0))
|
||||
self.assertEqual(determine_file_encoding(b'# coding: utf-8 \r\n--verbose'), ('utf-8', 0))
|
||||
|
||||
self.assertEqual(determine_file_encoding(
|
||||
b'\0\0\0#\0\0\0 \0\0\0c\0\0\0o\0\0\0d\0\0\0i\0\0\0n\0\0\0g\0\0\0:\0\0\0 \0\0\0u\0\0\0t\0\0\0f\0\0\0-\0\0\x003\0\0\x002\0\0\0-\0\0\0b\0\0\0e'),
|
||||
('utf-32-be', 0))
|
||||
self.assertEqual(determine_file_encoding(
|
||||
b'#\0 \0c\0o\0d\0i\0n\0g\0:\0 \0u\0t\0f\0-\x001\x006\0-\0l\0e\0'),
|
||||
('utf-16-le', 0))
|
||||
self.assertEqual(determine_file_encoding('# coding: utf-32-be'.encode('utf-32-be')), ('utf-32-be', 0))
|
||||
self.assertEqual(determine_file_encoding('# coding: utf-16-le'.encode('utf-16-le')), ('utf-16-le', 0))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
@ -1,17 +1,17 @@
|
||||
import itertools
|
||||
import hashlib
|
||||
import itertools
|
||||
import json
|
||||
import re
|
||||
import time
|
||||
import urllib.error
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_HTTPError,
|
||||
)
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
format_field,
|
||||
decode_base_n,
|
||||
encode_base_n,
|
||||
float_or_none,
|
||||
format_field,
|
||||
get_element_by_attribute,
|
||||
int_or_none,
|
||||
lowercase_escape,
|
||||
@ -22,6 +22,18 @@ from ..utils import (
|
||||
urlencode_postdata,
|
||||
)
|
||||
|
||||
_ENCODING_CHARS = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_'
|
||||
|
||||
|
||||
def _pk_to_id(id):
|
||||
"""Source: https://stackoverflow.com/questions/24437823/getting-instagram-post-url-from-media-id"""
|
||||
return encode_base_n(int(id.split('_')[0]), table=_ENCODING_CHARS)
|
||||
|
||||
|
||||
def _id_to_pk(shortcode):
|
||||
"""Covert a shortcode to a numeric value"""
|
||||
return decode_base_n(shortcode[:11], table=_ENCODING_CHARS)
|
||||
|
||||
|
||||
class InstagramBaseIE(InfoExtractor):
|
||||
_NETRC_MACHINE = 'instagram'
|
||||
@ -156,6 +168,15 @@ class InstagramBaseIE(InfoExtractor):
|
||||
if isinstance(product_info, list):
|
||||
product_info = product_info[0]
|
||||
|
||||
comment_data = traverse_obj(product_info, ('edge_media_to_parent_comment', 'edges'))
|
||||
comments = [{
|
||||
'author': traverse_obj(comment_dict, ('node', 'owner', 'username')),
|
||||
'author_id': traverse_obj(comment_dict, ('node', 'owner', 'id')),
|
||||
'id': traverse_obj(comment_dict, ('node', 'id')),
|
||||
'text': traverse_obj(comment_dict, ('node', 'text')),
|
||||
'timestamp': traverse_obj(comment_dict, ('node', 'created_at'), expected_type=int_or_none),
|
||||
} for comment_dict in comment_data] if comment_data else None
|
||||
|
||||
user_info = product_info.get('user') or {}
|
||||
info_dict = {
|
||||
'id': product_info.get('code') or product_info.get('id'),
|
||||
@ -168,6 +189,7 @@ class InstagramBaseIE(InfoExtractor):
|
||||
'view_count': int_or_none(product_info.get('view_count')),
|
||||
'like_count': int_or_none(product_info.get('like_count')),
|
||||
'comment_count': int_or_none(product_info.get('comment_count')),
|
||||
'comments': comments,
|
||||
'http_headers': {
|
||||
'Referer': 'https://www.instagram.com/',
|
||||
}
|
||||
@ -214,23 +236,9 @@ class InstagramIOSIE(InfoExtractor):
|
||||
'add_ie': ['Instagram']
|
||||
}]
|
||||
|
||||
def _get_id(self, id):
|
||||
"""Source: https://stackoverflow.com/questions/24437823/getting-instagram-post-url-from-media-id"""
|
||||
chrs = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_'
|
||||
media_id = int(id.split('_')[0])
|
||||
shortened_id = ''
|
||||
while media_id > 0:
|
||||
r = media_id % 64
|
||||
media_id = (media_id - r) // 64
|
||||
shortened_id = chrs[r] + shortened_id
|
||||
return shortened_id
|
||||
|
||||
def _real_extract(self, url):
|
||||
return {
|
||||
'_type': 'url_transparent',
|
||||
'url': f'http://instagram.com/tv/{self._get_id(self._match_id(url))}/',
|
||||
'ie_key': 'Instagram',
|
||||
}
|
||||
video_id = _pk_to_id(self._match_id(url))
|
||||
return self.url_result(f'http://instagram.com/tv/{video_id}', InstagramIE, video_id)
|
||||
|
||||
|
||||
class InstagramIE(InstagramBaseIE):
|
||||
@ -358,39 +366,49 @@ class InstagramIE(InstagramBaseIE):
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id, url = self._match_valid_url(url).group('id', 'url')
|
||||
webpage, urlh = self._download_webpage_handle(url, video_id)
|
||||
if 'www.instagram.com/accounts/login' in urlh.geturl():
|
||||
self.report_warning('Main webpage is locked behind the login page. '
|
||||
'Retrying with embed webpage (Note that some metadata might be missing)')
|
||||
webpage = self._download_webpage(
|
||||
'https://www.instagram.com/p/%s/embed/' % video_id, video_id, note='Downloading embed webpage')
|
||||
|
||||
shared_data = self._parse_json(
|
||||
self._search_regex(
|
||||
r'window\._sharedData\s*=\s*({.+?});',
|
||||
webpage, 'shared data', default='{}'),
|
||||
video_id, fatal=False)
|
||||
media = traverse_obj(
|
||||
shared_data,
|
||||
('entry_data', 'PostPage', 0, 'graphql', 'shortcode_media'),
|
||||
('entry_data', 'PostPage', 0, 'media'),
|
||||
expected_type=dict)
|
||||
|
||||
# _sharedData.entry_data.PostPage is empty when authenticated (see
|
||||
# https://github.com/ytdl-org/youtube-dl/pull/22880)
|
||||
general_info = self._download_json(
|
||||
f'https://www.instagram.com/graphql/query/?query_hash=9f8827793ef34641b2fb195d4d41151c'
|
||||
f'&variables=%7B"shortcode":"{video_id}",'
|
||||
'"parent_comment_count":10,"has_threaded_comments":true}', video_id, fatal=False, errnote=False,
|
||||
headers={
|
||||
'Accept': '*',
|
||||
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.0.0 Safari/537.36',
|
||||
'Authority': 'www.instagram.com',
|
||||
'Referer': 'https://www.instagram.com',
|
||||
'x-ig-app-id': '936619743392459',
|
||||
})
|
||||
media = traverse_obj(general_info, ('data', 'shortcode_media')) or {}
|
||||
if not media:
|
||||
additional_data = self._parse_json(
|
||||
self._search_regex(
|
||||
r'window\.__additionalDataLoaded\s*\(\s*[^,]+,\s*({.+?})\s*\);',
|
||||
webpage, 'additional data', default='{}'),
|
||||
video_id, fatal=False)
|
||||
product_item = traverse_obj(additional_data, ('items', 0), expected_type=dict)
|
||||
if product_item:
|
||||
return self._extract_product(product_item)
|
||||
media = traverse_obj(additional_data, ('graphql', 'shortcode_media'), 'shortcode_media', expected_type=dict) or {}
|
||||
self.report_warning('General metadata extraction failed', video_id)
|
||||
|
||||
if not media and 'www.instagram.com/accounts/login' in urlh.geturl():
|
||||
self.raise_login_required('You need to log in to access this content')
|
||||
info = self._download_json(
|
||||
f'https://i.instagram.com/api/v1/media/{_id_to_pk(video_id)}/info/', video_id,
|
||||
fatal=False, note='Downloading video info', errnote=False, headers={
|
||||
'Accept': '*',
|
||||
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.0.0 Safari/537.36',
|
||||
'Authority': 'www.instagram.com',
|
||||
'Referer': 'https://www.instagram.com',
|
||||
'x-ig-app-id': '936619743392459',
|
||||
})
|
||||
if info:
|
||||
media.update(info['items'][0])
|
||||
return self._extract_product(media)
|
||||
|
||||
webpage = self._download_webpage(
|
||||
f'https://www.instagram.com/p/{video_id}/embed/', video_id,
|
||||
note='Downloading embed webpage', fatal=False)
|
||||
if not webpage:
|
||||
self.raise_login_required('Requested content was not found, the content might be private')
|
||||
|
||||
additional_data = self._search_json(
|
||||
r'window\.__additionalDataLoaded\s*\(\s*[^,]+,\s*', webpage, 'additional data', video_id, fatal=False)
|
||||
product_item = traverse_obj(additional_data, ('items', 0), expected_type=dict)
|
||||
if product_item:
|
||||
media.update(product_item)
|
||||
return self._extract_product(media)
|
||||
|
||||
media.update(traverse_obj(
|
||||
additional_data, ('graphql', 'shortcode_media'), 'shortcode_media', expected_type=dict) or {})
|
||||
|
||||
username = traverse_obj(media, ('owner', 'username')) or self._search_regex(
|
||||
r'"owner"\s*:\s*{\s*"username"\s*:\s*"(.+?)"', webpage, 'username', fatal=False)
|
||||
@ -519,7 +537,7 @@ class InstagramPlaylistBaseIE(InstagramBaseIE):
|
||||
except ExtractorError as e:
|
||||
# if it's an error caused by a bad query, and there are
|
||||
# more GIS templates to try, ignore it and keep trying
|
||||
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403:
|
||||
if isinstance(e.cause, urllib.error.HTTPError) and e.cause.code == 403:
|
||||
if gis_tmpl != gis_tmpls[-1]:
|
||||
continue
|
||||
raise
|
||||
@ -629,41 +647,36 @@ class InstagramStoryIE(InstagramBaseIE):
|
||||
|
||||
def _real_extract(self, url):
|
||||
username, story_id = self._match_valid_url(url).groups()
|
||||
|
||||
story_info_url = f'{username}/{story_id}/?__a=1' if username == 'highlights' else f'{username}/?__a=1'
|
||||
story_info = self._download_json(f'https://www.instagram.com/stories/{story_info_url}', story_id, headers={
|
||||
'X-IG-App-ID': 936619743392459,
|
||||
'X-ASBD-ID': 198387,
|
||||
'X-IG-WWW-Claim': 0,
|
||||
'X-Requested-With': 'XMLHttpRequest',
|
||||
'Referer': url,
|
||||
})
|
||||
user_id = story_info['user']['id']
|
||||
highlight_title = traverse_obj(story_info, ('highlight', 'title'))
|
||||
story_info = self._download_webpage(url, story_id)
|
||||
user_info = self._search_json(r'"user":', story_info, 'user info', story_id, fatal=False)
|
||||
if not user_info:
|
||||
self.raise_login_required('This content is unreachable')
|
||||
user_id = user_info.get('id')
|
||||
|
||||
story_info_url = user_id if username != 'highlights' else f'highlight:{story_id}'
|
||||
videos = self._download_json(f'https://i.instagram.com/api/v1/feed/reels_media/?reel_ids={story_info_url}', story_id, headers={
|
||||
'X-IG-App-ID': 936619743392459,
|
||||
'X-ASBD-ID': 198387,
|
||||
'X-IG-WWW-Claim': 0,
|
||||
})['reels']
|
||||
videos = traverse_obj(self._download_json(
|
||||
f'https://i.instagram.com/api/v1/feed/reels_media/?reel_ids={story_info_url}',
|
||||
story_id, errnote=False, fatal=False, headers={
|
||||
'X-IG-App-ID': 936619743392459,
|
||||
'X-ASBD-ID': 198387,
|
||||
'X-IG-WWW-Claim': 0,
|
||||
}), 'reels')
|
||||
if not videos:
|
||||
self.raise_login_required('You need to log in to access this content')
|
||||
|
||||
full_name = traverse_obj(videos, ('user', 'full_name'))
|
||||
|
||||
user_info = {}
|
||||
if not (username and username != 'highlights' and full_name):
|
||||
user_info = self._download_json(
|
||||
f'https://i.instagram.com/api/v1/users/{user_id}/info/', story_id, headers={
|
||||
'User-Agent': 'Mozilla/5.0 (Linux; Android 11; SM-A505F Build/RP1A.200720.012; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/96.0.4664.45 Mobile Safari/537.36 Instagram 214.1.0.29.120 Android (30/11; 450dpi; 1080x2122; samsung; SM-A505F; a50; exynos9610; en_US; 333717274)',
|
||||
}, note='Downloading user info')
|
||||
|
||||
username = traverse_obj(user_info, ('user', 'username')) or username
|
||||
full_name = traverse_obj(user_info, ('user', 'full_name')) or full_name
|
||||
full_name = traverse_obj(videos, (f'highlight:{story_id}', 'user', 'full_name'), (str(user_id), 'user', 'full_name'))
|
||||
story_title = traverse_obj(videos, (f'highlight:{story_id}', 'title'))
|
||||
if not story_title:
|
||||
story_title = f'Story by {username}'
|
||||
|
||||
highlights = traverse_obj(videos, (f'highlight:{story_id}', 'items'), (str(user_id), 'items'))
|
||||
return self.playlist_result([{
|
||||
**self._extract_product(highlight),
|
||||
'title': f'Story by {username}',
|
||||
'uploader': full_name,
|
||||
'uploader_id': user_id,
|
||||
} for highlight in highlights], playlist_id=story_id, playlist_title=highlight_title)
|
||||
info_data = []
|
||||
for highlight in highlights:
|
||||
highlight_data = self._extract_product(highlight)
|
||||
if highlight_data.get('formats'):
|
||||
info_data.append({
|
||||
**highlight_data,
|
||||
'uploader': full_name,
|
||||
'uploader_id': user_id,
|
||||
})
|
||||
return self.playlist_result(info_data, playlist_id=story_id, playlist_title=story_title)
|
||||
|
@ -3485,6 +3485,7 @@ def age_restricted(content_limit, age_limit):
|
||||
return age_limit < content_limit
|
||||
|
||||
|
||||
# List of known byte-order-marks (BOM)
|
||||
BOMS = [
|
||||
(b'\xef\xbb\xbf', 'utf-8'),
|
||||
(b'\x00\x00\xfe\xff', 'utf-32-be'),
|
||||
@ -3492,7 +3493,6 @@ BOMS = [
|
||||
(b'\xff\xfe', 'utf-16-le'),
|
||||
(b'\xfe\xff', 'utf-16-be'),
|
||||
]
|
||||
""" List of known byte-order-marks (BOM) """
|
||||
|
||||
|
||||
def is_html(first_bytes):
|
||||
@ -5398,37 +5398,20 @@ def read_stdin(what):
|
||||
|
||||
def determine_file_encoding(data):
|
||||
"""
|
||||
From the first 512 bytes of a given file,
|
||||
it tries to detect the encoding to be used to read as text.
|
||||
|
||||
Detect the text encoding used
|
||||
@returns (encoding, bytes to skip)
|
||||
"""
|
||||
|
||||
# BOM marks are given priority over declarations
|
||||
for bom, enc in BOMS:
|
||||
# matching BOM beats any declaration
|
||||
# BOMs are skipped to prevent any errors
|
||||
if data.startswith(bom):
|
||||
return enc, len(bom)
|
||||
|
||||
# strip off all null bytes to match even when UTF-16 or UTF-32 is used
|
||||
# endians don't matter
|
||||
# Strip off all null bytes to match even when UTF-16 or UTF-32 is used.
|
||||
# We ignore the endianness to get a good enough match
|
||||
data = data.replace(b'\0', b'')
|
||||
|
||||
PREAMBLES = [
|
||||
# "# -*- coding: utf-8 -*-"
|
||||
# "# coding: utf-8"
|
||||
rb'(?m)^#(?:\s+-\*-)?\s*coding\s*:\s*(?P<encoding>\S+)(?:\s+-\*-)?\s*$',
|
||||
# "# vi: set fileencoding=utf-8"
|
||||
rb'^#\s+vi\s*:\s+set\s+fileencoding=(?P<encoding>[^\s,]+)'
|
||||
]
|
||||
for pb in PREAMBLES:
|
||||
mobj = re.match(pb, data)
|
||||
if not mobj:
|
||||
continue
|
||||
# preambles aren't skipped since they're just ignored when reading as config
|
||||
return mobj.group('encoding').decode(), 0
|
||||
|
||||
return None, 0
|
||||
mobj = re.match(rb'(?m)^#\s*coding\s*:\s*(\S+)\s*$', data)
|
||||
return mobj.group(1).decode() if mobj else None, 0
|
||||
|
||||
|
||||
class Config:
|
||||
|
Loading…
Reference in New Issue
Block a user