Inital Commit

This commit is contained in:
Brett Woodruff
2024-06-10 12:24:37 -04:00
commit 106024bcb4
2223 changed files with 241071 additions and 0 deletions

View File

@@ -0,0 +1,139 @@
# Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import logging
import os
import re
__version__ = '1.34.39'
class NullHandler(logging.Handler):
def emit(self, record):
pass
# Configure default logger to do nothing
log = logging.getLogger('botocore')
log.addHandler(NullHandler())
_INITIALIZERS = []
_first_cap_regex = re.compile('(.)([A-Z][a-z]+)')
_end_cap_regex = re.compile('([a-z0-9])([A-Z])')
# The regex below handles the special case where some acronym
# name is pluralized, e.g GatewayARNs, ListWebACLs, SomeCNAMEs.
_special_case_transform = re.compile('[A-Z]{2,}s$')
# Prepopulate the cache with special cases that don't match
# our regular transformation.
_xform_cache = {
('CreateCachediSCSIVolume', '_'): 'create_cached_iscsi_volume',
('CreateCachediSCSIVolume', '-'): 'create-cached-iscsi-volume',
('DescribeCachediSCSIVolumes', '_'): 'describe_cached_iscsi_volumes',
('DescribeCachediSCSIVolumes', '-'): 'describe-cached-iscsi-volumes',
('DescribeStorediSCSIVolumes', '_'): 'describe_stored_iscsi_volumes',
('DescribeStorediSCSIVolumes', '-'): 'describe-stored-iscsi-volumes',
('CreateStorediSCSIVolume', '_'): 'create_stored_iscsi_volume',
('CreateStorediSCSIVolume', '-'): 'create-stored-iscsi-volume',
('ListHITsForQualificationType', '_'): 'list_hits_for_qualification_type',
('ListHITsForQualificationType', '-'): 'list-hits-for-qualification-type',
('ExecutePartiQLStatement', '_'): 'execute_partiql_statement',
('ExecutePartiQLStatement', '-'): 'execute-partiql-statement',
('ExecutePartiQLTransaction', '_'): 'execute_partiql_transaction',
('ExecutePartiQLTransaction', '-'): 'execute-partiql-transaction',
('ExecutePartiQLBatch', '_'): 'execute_partiql_batch',
('ExecutePartiQLBatch', '-'): 'execute-partiql-batch',
}
# The items in this dict represent partial renames to apply globally to all
# services which might have a matching argument or operation. This way a
# common mis-translation can be fixed without having to call out each
# individual case.
ScalarTypes = ('string', 'integer', 'boolean', 'timestamp', 'float', 'double')
BOTOCORE_ROOT = os.path.dirname(os.path.abspath(__file__))
# Used to specify anonymous (unsigned) request signature
class UNSIGNED:
def __copy__(self):
return self
def __deepcopy__(self, memodict):
return self
UNSIGNED = UNSIGNED()
def xform_name(name, sep='_', _xform_cache=_xform_cache):
"""Convert camel case to a "pythonic" name.
If the name contains the ``sep`` character, then it is
returned unchanged.
"""
if sep in name:
# If the sep is in the name, assume that it's already
# transformed and return the string unchanged.
return name
key = (name, sep)
if key not in _xform_cache:
if _special_case_transform.search(name) is not None:
is_special = _special_case_transform.search(name)
matched = is_special.group()
# Replace something like ARNs, ACLs with _arns, _acls.
name = f"{name[: -len(matched)]}{sep}{matched.lower()}"
s1 = _first_cap_regex.sub(r'\1' + sep + r'\2', name)
transformed = _end_cap_regex.sub(r'\1' + sep + r'\2', s1).lower()
_xform_cache[key] = transformed
return _xform_cache[key]
def register_initializer(callback):
"""Register an initializer function for session creation.
This initializer function will be invoked whenever a new
`botocore.session.Session` is instantiated.
:type callback: callable
:param callback: A callable that accepts a single argument
of type `botocore.session.Session`.
"""
_INITIALIZERS.append(callback)
def unregister_initializer(callback):
"""Unregister an initializer function.
:type callback: callable
:param callback: A callable that was previously registered
with `botocore.register_initializer`.
:raises ValueError: If a callback is provided that is not currently
registered as an initializer.
"""
_INITIALIZERS.remove(callback)
def invoke_initializers(session):
"""Invoke all initializers for a session.
:type session: botocore.session.Session
:param session: The session to initialize.
"""
for initializer in _INITIALIZERS:
initializer(session)

View File

@@ -0,0 +1,769 @@
# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Internal module to help with normalizing botocore client args.
This module (and all function/classes within this module) should be
considered internal, and *not* a public API.
"""
import copy
import logging
import socket
import botocore.exceptions
import botocore.parsers
import botocore.serialize
from botocore.config import Config
from botocore.endpoint import EndpointCreator
from botocore.regions import EndpointResolverBuiltins as EPRBuiltins
from botocore.regions import EndpointRulesetResolver
from botocore.signers import RequestSigner
from botocore.useragent import UserAgentString
from botocore.utils import ensure_boolean, is_s3_accelerate_url
logger = logging.getLogger(__name__)
VALID_REGIONAL_ENDPOINTS_CONFIG = [
'legacy',
'regional',
]
LEGACY_GLOBAL_STS_REGIONS = [
'ap-northeast-1',
'ap-south-1',
'ap-southeast-1',
'ap-southeast-2',
'aws-global',
'ca-central-1',
'eu-central-1',
'eu-north-1',
'eu-west-1',
'eu-west-2',
'eu-west-3',
'sa-east-1',
'us-east-1',
'us-east-2',
'us-west-1',
'us-west-2',
]
# Maximum allowed length of the ``user_agent_appid`` config field. Longer
# values result in a warning-level log message.
USERAGENT_APPID_MAXLEN = 50
class ClientArgsCreator:
def __init__(
self,
event_emitter,
user_agent,
response_parser_factory,
loader,
exceptions_factory,
config_store,
user_agent_creator=None,
):
self._event_emitter = event_emitter
self._response_parser_factory = response_parser_factory
self._loader = loader
self._exceptions_factory = exceptions_factory
self._config_store = config_store
if user_agent_creator is None:
self._session_ua_creator = UserAgentString.from_environment()
else:
self._session_ua_creator = user_agent_creator
def get_client_args(
self,
service_model,
region_name,
is_secure,
endpoint_url,
verify,
credentials,
scoped_config,
client_config,
endpoint_bridge,
auth_token=None,
endpoints_ruleset_data=None,
partition_data=None,
):
final_args = self.compute_client_args(
service_model,
client_config,
endpoint_bridge,
region_name,
endpoint_url,
is_secure,
scoped_config,
)
service_name = final_args['service_name'] # noqa
parameter_validation = final_args['parameter_validation']
endpoint_config = final_args['endpoint_config']
protocol = final_args['protocol']
config_kwargs = final_args['config_kwargs']
s3_config = final_args['s3_config']
partition = endpoint_config['metadata'].get('partition', None)
socket_options = final_args['socket_options']
configured_endpoint_url = final_args['configured_endpoint_url']
signing_region = endpoint_config['signing_region']
endpoint_region_name = endpoint_config['region_name']
event_emitter = copy.copy(self._event_emitter)
signer = RequestSigner(
service_model.service_id,
signing_region,
endpoint_config['signing_name'],
endpoint_config['signature_version'],
credentials,
event_emitter,
auth_token,
)
config_kwargs['s3'] = s3_config
new_config = Config(**config_kwargs)
endpoint_creator = EndpointCreator(event_emitter)
endpoint = endpoint_creator.create_endpoint(
service_model,
region_name=endpoint_region_name,
endpoint_url=endpoint_config['endpoint_url'],
verify=verify,
response_parser_factory=self._response_parser_factory,
max_pool_connections=new_config.max_pool_connections,
proxies=new_config.proxies,
timeout=(new_config.connect_timeout, new_config.read_timeout),
socket_options=socket_options,
client_cert=new_config.client_cert,
proxies_config=new_config.proxies_config,
)
serializer = botocore.serialize.create_serializer(
protocol, parameter_validation
)
response_parser = botocore.parsers.create_parser(protocol)
ruleset_resolver = self._build_endpoint_resolver(
endpoints_ruleset_data,
partition_data,
client_config,
service_model,
endpoint_region_name,
region_name,
configured_endpoint_url,
endpoint,
is_secure,
endpoint_bridge,
event_emitter,
)
# Copy the session's user agent factory and adds client configuration.
client_ua_creator = self._session_ua_creator.with_client_config(
new_config
)
supplied_ua = client_config.user_agent if client_config else None
new_config._supplied_user_agent = supplied_ua
return {
'serializer': serializer,
'endpoint': endpoint,
'response_parser': response_parser,
'event_emitter': event_emitter,
'request_signer': signer,
'service_model': service_model,
'loader': self._loader,
'client_config': new_config,
'partition': partition,
'exceptions_factory': self._exceptions_factory,
'endpoint_ruleset_resolver': ruleset_resolver,
'user_agent_creator': client_ua_creator,
}
def compute_client_args(
self,
service_model,
client_config,
endpoint_bridge,
region_name,
endpoint_url,
is_secure,
scoped_config,
):
service_name = service_model.endpoint_prefix
protocol = service_model.metadata['protocol']
parameter_validation = True
if client_config and not client_config.parameter_validation:
parameter_validation = False
elif scoped_config:
raw_value = scoped_config.get('parameter_validation')
if raw_value is not None:
parameter_validation = ensure_boolean(raw_value)
s3_config = self.compute_s3_config(client_config)
configured_endpoint_url = self._compute_configured_endpoint_url(
client_config=client_config,
endpoint_url=endpoint_url,
)
endpoint_config = self._compute_endpoint_config(
service_name=service_name,
region_name=region_name,
endpoint_url=configured_endpoint_url,
is_secure=is_secure,
endpoint_bridge=endpoint_bridge,
s3_config=s3_config,
)
endpoint_variant_tags = endpoint_config['metadata'].get('tags', [])
# Some third-party libraries expect the final user-agent string in
# ``client.meta.config.user_agent``. To maintain backwards
# compatibility, the preliminary user-agent string (before any Config
# object modifications and without request-specific user-agent
# components) is stored in the new Config object's ``user_agent``
# property but not used by Botocore itself.
preliminary_ua_string = self._session_ua_creator.with_client_config(
client_config
).to_string()
# Create a new client config to be passed to the client based
# on the final values. We do not want the user to be able
# to try to modify an existing client with a client config.
config_kwargs = dict(
region_name=endpoint_config['region_name'],
signature_version=endpoint_config['signature_version'],
user_agent=preliminary_ua_string,
)
if 'dualstack' in endpoint_variant_tags:
config_kwargs.update(use_dualstack_endpoint=True)
if 'fips' in endpoint_variant_tags:
config_kwargs.update(use_fips_endpoint=True)
if client_config is not None:
config_kwargs.update(
connect_timeout=client_config.connect_timeout,
read_timeout=client_config.read_timeout,
max_pool_connections=client_config.max_pool_connections,
proxies=client_config.proxies,
proxies_config=client_config.proxies_config,
retries=client_config.retries,
client_cert=client_config.client_cert,
inject_host_prefix=client_config.inject_host_prefix,
tcp_keepalive=client_config.tcp_keepalive,
user_agent_extra=client_config.user_agent_extra,
user_agent_appid=client_config.user_agent_appid,
request_min_compression_size_bytes=(
client_config.request_min_compression_size_bytes
),
disable_request_compression=(
client_config.disable_request_compression
),
client_context_params=client_config.client_context_params,
)
self._compute_retry_config(config_kwargs)
self._compute_connect_timeout(config_kwargs)
self._compute_user_agent_appid_config(config_kwargs)
self._compute_request_compression_config(config_kwargs)
s3_config = self.compute_s3_config(client_config)
is_s3_service = self._is_s3_service(service_name)
if is_s3_service and 'dualstack' in endpoint_variant_tags:
if s3_config is None:
s3_config = {}
s3_config['use_dualstack_endpoint'] = True
return {
'service_name': service_name,
'parameter_validation': parameter_validation,
'configured_endpoint_url': configured_endpoint_url,
'endpoint_config': endpoint_config,
'protocol': protocol,
'config_kwargs': config_kwargs,
's3_config': s3_config,
'socket_options': self._compute_socket_options(
scoped_config, client_config
),
}
def _compute_configured_endpoint_url(self, client_config, endpoint_url):
if endpoint_url is not None:
return endpoint_url
if self._ignore_configured_endpoint_urls(client_config):
logger.debug("Ignoring configured endpoint URLs.")
return endpoint_url
return self._config_store.get_config_variable('endpoint_url')
def _ignore_configured_endpoint_urls(self, client_config):
if (
client_config
and client_config.ignore_configured_endpoint_urls is not None
):
return client_config.ignore_configured_endpoint_urls
return self._config_store.get_config_variable(
'ignore_configured_endpoint_urls'
)
def compute_s3_config(self, client_config):
s3_configuration = self._config_store.get_config_variable('s3')
# Next specific client config values takes precedence over
# specific values in the scoped config.
if client_config is not None:
if client_config.s3 is not None:
if s3_configuration is None:
s3_configuration = client_config.s3
else:
# The current s3_configuration dictionary may be
# from a source that only should be read from so
# we want to be safe and just make a copy of it to modify
# before it actually gets updated.
s3_configuration = s3_configuration.copy()
s3_configuration.update(client_config.s3)
return s3_configuration
def _is_s3_service(self, service_name):
"""Whether the service is S3 or S3 Control.
Note that throughout this class, service_name refers to the endpoint
prefix, not the folder name of the service in botocore/data. For
S3 Control, the folder name is 's3control' but the endpoint prefix is
's3-control'.
"""
return service_name in ['s3', 's3-control']
def _compute_endpoint_config(
self,
service_name,
region_name,
endpoint_url,
is_secure,
endpoint_bridge,
s3_config,
):
resolve_endpoint_kwargs = {
'service_name': service_name,
'region_name': region_name,
'endpoint_url': endpoint_url,
'is_secure': is_secure,
'endpoint_bridge': endpoint_bridge,
}
if service_name == 's3':
return self._compute_s3_endpoint_config(
s3_config=s3_config, **resolve_endpoint_kwargs
)
if service_name == 'sts':
return self._compute_sts_endpoint_config(**resolve_endpoint_kwargs)
return self._resolve_endpoint(**resolve_endpoint_kwargs)
def _compute_s3_endpoint_config(
self, s3_config, **resolve_endpoint_kwargs
):
force_s3_global = self._should_force_s3_global(
resolve_endpoint_kwargs['region_name'], s3_config
)
if force_s3_global:
resolve_endpoint_kwargs['region_name'] = None
endpoint_config = self._resolve_endpoint(**resolve_endpoint_kwargs)
self._set_region_if_custom_s3_endpoint(
endpoint_config, resolve_endpoint_kwargs['endpoint_bridge']
)
# For backwards compatibility reasons, we want to make sure the
# client.meta.region_name will remain us-east-1 if we forced the
# endpoint to be the global region. Specifically, if this value
# changes to aws-global, it breaks logic where a user is checking
# for us-east-1 as the global endpoint such as in creating buckets.
if force_s3_global and endpoint_config['region_name'] == 'aws-global':
endpoint_config['region_name'] = 'us-east-1'
return endpoint_config
def _should_force_s3_global(self, region_name, s3_config):
s3_regional_config = 'legacy'
if s3_config and 'us_east_1_regional_endpoint' in s3_config:
s3_regional_config = s3_config['us_east_1_regional_endpoint']
self._validate_s3_regional_config(s3_regional_config)
is_global_region = region_name in ('us-east-1', None)
return s3_regional_config == 'legacy' and is_global_region
def _validate_s3_regional_config(self, config_val):
if config_val not in VALID_REGIONAL_ENDPOINTS_CONFIG:
raise botocore.exceptions.InvalidS3UsEast1RegionalEndpointConfigError(
s3_us_east_1_regional_endpoint_config=config_val
)
def _set_region_if_custom_s3_endpoint(
self, endpoint_config, endpoint_bridge
):
# If a user is providing a custom URL, the endpoint resolver will
# refuse to infer a signing region. If we want to default to s3v4,
# we have to account for this.
if (
endpoint_config['signing_region'] is None
and endpoint_config['region_name'] is None
):
endpoint = endpoint_bridge.resolve('s3')
endpoint_config['signing_region'] = endpoint['signing_region']
endpoint_config['region_name'] = endpoint['region_name']
def _compute_sts_endpoint_config(self, **resolve_endpoint_kwargs):
endpoint_config = self._resolve_endpoint(**resolve_endpoint_kwargs)
if self._should_set_global_sts_endpoint(
resolve_endpoint_kwargs['region_name'],
resolve_endpoint_kwargs['endpoint_url'],
endpoint_config,
):
self._set_global_sts_endpoint(
endpoint_config, resolve_endpoint_kwargs['is_secure']
)
return endpoint_config
def _should_set_global_sts_endpoint(
self, region_name, endpoint_url, endpoint_config
):
has_variant_tags = endpoint_config and endpoint_config.get(
'metadata', {}
).get('tags')
if endpoint_url or has_variant_tags:
return False
return (
self._get_sts_regional_endpoints_config() == 'legacy'
and region_name in LEGACY_GLOBAL_STS_REGIONS
)
def _get_sts_regional_endpoints_config(self):
sts_regional_endpoints_config = self._config_store.get_config_variable(
'sts_regional_endpoints'
)
if not sts_regional_endpoints_config:
sts_regional_endpoints_config = 'legacy'
if (
sts_regional_endpoints_config
not in VALID_REGIONAL_ENDPOINTS_CONFIG
):
raise botocore.exceptions.InvalidSTSRegionalEndpointsConfigError(
sts_regional_endpoints_config=sts_regional_endpoints_config
)
return sts_regional_endpoints_config
def _set_global_sts_endpoint(self, endpoint_config, is_secure):
scheme = 'https' if is_secure else 'http'
endpoint_config['endpoint_url'] = '%s://sts.amazonaws.com' % scheme
endpoint_config['signing_region'] = 'us-east-1'
def _resolve_endpoint(
self,
service_name,
region_name,
endpoint_url,
is_secure,
endpoint_bridge,
):
return endpoint_bridge.resolve(
service_name, region_name, endpoint_url, is_secure
)
def _compute_socket_options(self, scoped_config, client_config=None):
# This disables Nagle's algorithm and is the default socket options
# in urllib3.
socket_options = [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]
client_keepalive = client_config and client_config.tcp_keepalive
scoped_keepalive = scoped_config and self._ensure_boolean(
scoped_config.get("tcp_keepalive", False)
)
# Enables TCP Keepalive if specified in client config object or shared config file.
if client_keepalive or scoped_keepalive:
socket_options.append((socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1))
return socket_options
def _compute_retry_config(self, config_kwargs):
self._compute_retry_max_attempts(config_kwargs)
self._compute_retry_mode(config_kwargs)
def _compute_retry_max_attempts(self, config_kwargs):
# There's a pre-existing max_attempts client config value that actually
# means max *retry* attempts. There's also a `max_attempts` we pull
# from the config store that means *total attempts*, which includes the
# intitial request. We can't change what `max_attempts` means in
# client config so we try to normalize everything to a new
# "total_max_attempts" variable. We ensure that after this, the only
# configuration for "max attempts" is the 'total_max_attempts' key.
# An explicitly provided max_attempts in the client config
# overrides everything.
retries = config_kwargs.get('retries')
if retries is not None:
if 'total_max_attempts' in retries:
retries.pop('max_attempts', None)
return
if 'max_attempts' in retries:
value = retries.pop('max_attempts')
# client config max_attempts means total retries so we
# have to add one for 'total_max_attempts' to account
# for the initial request.
retries['total_max_attempts'] = value + 1
return
# Otherwise we'll check the config store which checks env vars,
# config files, etc. There is no default value for max_attempts
# so if this returns None and we don't set a default value here.
max_attempts = self._config_store.get_config_variable('max_attempts')
if max_attempts is not None:
if retries is None:
retries = {}
config_kwargs['retries'] = retries
retries['total_max_attempts'] = max_attempts
def _compute_retry_mode(self, config_kwargs):
retries = config_kwargs.get('retries')
if retries is None:
retries = {}
config_kwargs['retries'] = retries
elif 'mode' in retries:
# If there's a retry mode explicitly set in the client config
# that overrides everything.
return
retry_mode = self._config_store.get_config_variable('retry_mode')
if retry_mode is None:
retry_mode = 'legacy'
retries['mode'] = retry_mode
def _compute_connect_timeout(self, config_kwargs):
# Checking if connect_timeout is set on the client config.
# If it is not, we check the config_store in case a
# non legacy default mode has been configured.
connect_timeout = config_kwargs.get('connect_timeout')
if connect_timeout is not None:
return
connect_timeout = self._config_store.get_config_variable(
'connect_timeout'
)
if connect_timeout:
config_kwargs['connect_timeout'] = connect_timeout
def _compute_request_compression_config(self, config_kwargs):
min_size = config_kwargs.get('request_min_compression_size_bytes')
disabled = config_kwargs.get('disable_request_compression')
if min_size is None:
min_size = self._config_store.get_config_variable(
'request_min_compression_size_bytes'
)
# conversion func is skipped so input validation must be done here
# regardless if the value is coming from the config store or the
# config object
min_size = self._validate_min_compression_size(min_size)
config_kwargs['request_min_compression_size_bytes'] = min_size
if disabled is None:
disabled = self._config_store.get_config_variable(
'disable_request_compression'
)
else:
# if the user provided a value we must check if it's a boolean
disabled = ensure_boolean(disabled)
config_kwargs['disable_request_compression'] = disabled
def _validate_min_compression_size(self, min_size):
min_allowed_min_size = 1
max_allowed_min_size = 1048576
if min_size is not None:
error_msg_base = (
f'Invalid value "{min_size}" for '
'request_min_compression_size_bytes.'
)
try:
min_size = int(min_size)
except (ValueError, TypeError):
msg = (
f'{error_msg_base} Value must be an integer. '
f'Received {type(min_size)} instead.'
)
raise botocore.exceptions.InvalidConfigError(error_msg=msg)
if not min_allowed_min_size <= min_size <= max_allowed_min_size:
msg = (
f'{error_msg_base} Value must be between '
f'{min_allowed_min_size} and {max_allowed_min_size}.'
)
raise botocore.exceptions.InvalidConfigError(error_msg=msg)
return min_size
def _ensure_boolean(self, val):
if isinstance(val, bool):
return val
else:
return val.lower() == 'true'
def _build_endpoint_resolver(
self,
endpoints_ruleset_data,
partition_data,
client_config,
service_model,
endpoint_region_name,
region_name,
endpoint_url,
endpoint,
is_secure,
endpoint_bridge,
event_emitter,
):
if endpoints_ruleset_data is None:
return None
# The legacy EndpointResolver is global to the session, but
# EndpointRulesetResolver is service-specific. Builtins for
# EndpointRulesetResolver must not be derived from the legacy
# endpoint resolver's output, including final_args, s3_config,
# etc.
s3_config_raw = self.compute_s3_config(client_config) or {}
service_name_raw = service_model.endpoint_prefix
# Maintain complex logic for s3 and sts endpoints for backwards
# compatibility.
if service_name_raw in ['s3', 'sts'] or region_name is None:
eprv2_region_name = endpoint_region_name
else:
eprv2_region_name = region_name
resolver_builtins = self.compute_endpoint_resolver_builtin_defaults(
region_name=eprv2_region_name,
service_name=service_name_raw,
s3_config=s3_config_raw,
endpoint_bridge=endpoint_bridge,
client_endpoint_url=endpoint_url,
legacy_endpoint_url=endpoint.host,
)
# Client context params for s3 conflict with the available settings
# in the `s3` parameter on the `Config` object. If the same parameter
# is set in both places, the value in the `s3` parameter takes priority.
if client_config is not None:
client_context = client_config.client_context_params or {}
else:
client_context = {}
if self._is_s3_service(service_name_raw):
client_context.update(s3_config_raw)
sig_version = (
client_config.signature_version
if client_config is not None
else None
)
return EndpointRulesetResolver(
endpoint_ruleset_data=endpoints_ruleset_data,
partition_data=partition_data,
service_model=service_model,
builtins=resolver_builtins,
client_context=client_context,
event_emitter=event_emitter,
use_ssl=is_secure,
requested_auth_scheme=sig_version,
)
def compute_endpoint_resolver_builtin_defaults(
self,
region_name,
service_name,
s3_config,
endpoint_bridge,
client_endpoint_url,
legacy_endpoint_url,
):
# EndpointRulesetResolver rulesets may accept an "SDK::Endpoint" as
# input. If the endpoint_url argument of create_client() is set, it
# always takes priority.
if client_endpoint_url:
given_endpoint = client_endpoint_url
# If an endpoints.json data file other than the one bundled within
# the botocore/data directory is used, the output of legacy
# endpoint resolution is provided to EndpointRulesetResolver.
elif not endpoint_bridge.resolver_uses_builtin_data():
given_endpoint = legacy_endpoint_url
else:
given_endpoint = None
# The endpoint rulesets differ from legacy botocore behavior in whether
# forcing path style addressing in incompatible situations raises an
# exception or silently ignores the config setting. The
# AWS_S3_FORCE_PATH_STYLE parameter is adjusted both here and for each
# operation so that the ruleset behavior is backwards compatible.
if s3_config.get('use_accelerate_endpoint', False):
force_path_style = False
elif client_endpoint_url is not None and not is_s3_accelerate_url(
client_endpoint_url
):
force_path_style = s3_config.get('addressing_style') != 'virtual'
else:
force_path_style = s3_config.get('addressing_style') == 'path'
return {
EPRBuiltins.AWS_REGION: region_name,
EPRBuiltins.AWS_USE_FIPS: (
# SDK_ENDPOINT cannot be combined with AWS_USE_FIPS
given_endpoint is None
# use legacy resolver's _resolve_endpoint_variant_config_var()
# or default to False if it returns None
and endpoint_bridge._resolve_endpoint_variant_config_var(
'use_fips_endpoint'
)
or False
),
EPRBuiltins.AWS_USE_DUALSTACK: (
# SDK_ENDPOINT cannot be combined with AWS_USE_DUALSTACK
given_endpoint is None
# use legacy resolver's _resolve_use_dualstack_endpoint() and
# or default to False if it returns None
and endpoint_bridge._resolve_use_dualstack_endpoint(
service_name
)
or False
),
EPRBuiltins.AWS_STS_USE_GLOBAL_ENDPOINT: (
self._should_set_global_sts_endpoint(
region_name=region_name,
endpoint_url=None,
endpoint_config=None,
)
),
EPRBuiltins.AWS_S3_USE_GLOBAL_ENDPOINT: (
self._should_force_s3_global(region_name, s3_config)
),
EPRBuiltins.AWS_S3_ACCELERATE: s3_config.get(
'use_accelerate_endpoint', False
),
EPRBuiltins.AWS_S3_FORCE_PATH_STYLE: force_path_style,
EPRBuiltins.AWS_S3_USE_ARN_REGION: s3_config.get(
'use_arn_region', True
),
EPRBuiltins.AWS_S3CONTROL_USE_ARN_REGION: s3_config.get(
'use_arn_region', False
),
EPRBuiltins.AWS_S3_DISABLE_MRAP: s3_config.get(
's3_disable_multiregion_access_points', False
),
EPRBuiltins.SDK_ENDPOINT: given_endpoint,
}
def _compute_user_agent_appid_config(self, config_kwargs):
user_agent_appid = config_kwargs.get('user_agent_appid')
if user_agent_appid is None:
user_agent_appid = self._config_store.get_config_variable(
'user_agent_appid'
)
if (
user_agent_appid is not None
and len(user_agent_appid) > USERAGENT_APPID_MAXLEN
):
logger.warning(
'The configured value for user_agent_appid exceeds the '
f'maximum length of {USERAGENT_APPID_MAXLEN} characters.'
)
config_kwargs['user_agent_appid'] = user_agent_appid

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,635 @@
# Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import functools
import logging
from collections.abc import Mapping
import urllib3.util
from urllib3.connection import HTTPConnection, VerifiedHTTPSConnection
from urllib3.connectionpool import HTTPConnectionPool, HTTPSConnectionPool
import botocore.utils
from botocore.compat import (
HTTPHeaders,
HTTPResponse,
MutableMapping,
urlencode,
urlparse,
urlsplit,
urlunsplit,
)
from botocore.exceptions import UnseekableStreamError
logger = logging.getLogger(__name__)
class AWSHTTPResponse(HTTPResponse):
# The *args, **kwargs is used because the args are slightly
# different in py2.6 than in py2.7/py3.
def __init__(self, *args, **kwargs):
self._status_tuple = kwargs.pop('status_tuple')
HTTPResponse.__init__(self, *args, **kwargs)
def _read_status(self):
if self._status_tuple is not None:
status_tuple = self._status_tuple
self._status_tuple = None
return status_tuple
else:
return HTTPResponse._read_status(self)
class AWSConnection:
"""Mixin for HTTPConnection that supports Expect 100-continue.
This when mixed with a subclass of httplib.HTTPConnection (though
technically we subclass from urllib3, which subclasses
httplib.HTTPConnection) and we only override this class to support Expect
100-continue, which we need for S3. As far as I can tell, this is
general purpose enough to not be specific to S3, but I'm being
tentative and keeping it in botocore because I've only tested
this against AWS services.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._original_response_cls = self.response_class
# This variable is set when we receive an early response from the
# server. If this value is set to True, any calls to send() are noops.
# This value is reset to false every time _send_request is called.
# This is to workaround changes in urllib3 2.0 which uses separate
# send() calls in request() instead of delegating to endheaders(),
# which is where the body is sent in CPython's HTTPConnection.
self._response_received = False
self._expect_header_set = False
self._send_called = False
def close(self):
super().close()
# Reset all of our instance state we were tracking.
self._response_received = False
self._expect_header_set = False
self._send_called = False
self.response_class = self._original_response_cls
def request(self, method, url, body=None, headers=None, *args, **kwargs):
if headers is None:
headers = {}
self._response_received = False
if headers.get('Expect', b'') == b'100-continue':
self._expect_header_set = True
else:
self._expect_header_set = False
self.response_class = self._original_response_cls
rval = super().request(method, url, body, headers, *args, **kwargs)
self._expect_header_set = False
return rval
def _convert_to_bytes(self, mixed_buffer):
# Take a list of mixed str/bytes and convert it
# all into a single bytestring.
# Any str will be encoded as utf-8.
bytes_buffer = []
for chunk in mixed_buffer:
if isinstance(chunk, str):
bytes_buffer.append(chunk.encode('utf-8'))
else:
bytes_buffer.append(chunk)
msg = b"\r\n".join(bytes_buffer)
return msg
def _send_output(self, message_body=None, *args, **kwargs):
self._buffer.extend((b"", b""))
msg = self._convert_to_bytes(self._buffer)
del self._buffer[:]
# If msg and message_body are sent in a single send() call,
# it will avoid performance problems caused by the interaction
# between delayed ack and the Nagle algorithm.
if isinstance(message_body, bytes):
msg += message_body
message_body = None
self.send(msg)
if self._expect_header_set:
# This is our custom behavior. If the Expect header was
# set, it will trigger this custom behavior.
logger.debug("Waiting for 100 Continue response.")
# Wait for 1 second for the server to send a response.
if urllib3.util.wait_for_read(self.sock, 1):
self._handle_expect_response(message_body)
return
else:
# From the RFC:
# Because of the presence of older implementations, the
# protocol allows ambiguous situations in which a client may
# send "Expect: 100-continue" without receiving either a 417
# (Expectation Failed) status or a 100 (Continue) status.
# Therefore, when a client sends this header field to an origin
# server (possibly via a proxy) from which it has never seen a
# 100 (Continue) status, the client SHOULD NOT wait for an
# indefinite period before sending the request body.
logger.debug(
"No response seen from server, continuing to "
"send the response body."
)
if message_body is not None:
# message_body was not a string (i.e. it is a file), and
# we must run the risk of Nagle.
self.send(message_body)
def _consume_headers(self, fp):
# Most servers (including S3) will just return
# the CLRF after the 100 continue response. However,
# some servers (I've specifically seen this for squid when
# used as a straight HTTP proxy) will also inject a
# Connection: keep-alive header. To account for this
# we'll read until we read '\r\n', and ignore any headers
# that come immediately after the 100 continue response.
current = None
while current != b'\r\n':
current = fp.readline()
def _handle_expect_response(self, message_body):
# This is called when we sent the request headers containing
# an Expect: 100-continue header and received a response.
# We now need to figure out what to do.
fp = self.sock.makefile('rb', 0)
try:
maybe_status_line = fp.readline()
parts = maybe_status_line.split(None, 2)
if self._is_100_continue_status(maybe_status_line):
self._consume_headers(fp)
logger.debug(
"100 Continue response seen, now sending request body."
)
self._send_message_body(message_body)
elif len(parts) == 3 and parts[0].startswith(b'HTTP/'):
# From the RFC:
# Requirements for HTTP/1.1 origin servers:
#
# - Upon receiving a request which includes an Expect
# request-header field with the "100-continue"
# expectation, an origin server MUST either respond with
# 100 (Continue) status and continue to read from the
# input stream, or respond with a final status code.
#
# So if we don't get a 100 Continue response, then
# whatever the server has sent back is the final response
# and don't send the message_body.
logger.debug(
"Received a non 100 Continue response "
"from the server, NOT sending request body."
)
status_tuple = (
parts[0].decode('ascii'),
int(parts[1]),
parts[2].decode('ascii'),
)
response_class = functools.partial(
AWSHTTPResponse, status_tuple=status_tuple
)
self.response_class = response_class
self._response_received = True
finally:
fp.close()
def _send_message_body(self, message_body):
if message_body is not None:
self.send(message_body)
def send(self, str):
if self._response_received:
if not self._send_called:
# urllib3 2.0 chunks and calls send potentially
# thousands of times inside `request` unlike the
# standard library. Only log this once for sanity.
logger.debug(
"send() called, but response already received. "
"Not sending data."
)
self._send_called = True
return
return super().send(str)
def _is_100_continue_status(self, maybe_status_line):
parts = maybe_status_line.split(None, 2)
# Check for HTTP/<version> 100 Continue\r\n
return (
len(parts) >= 3
and parts[0].startswith(b'HTTP/')
and parts[1] == b'100'
)
class AWSHTTPConnection(AWSConnection, HTTPConnection):
"""An HTTPConnection that supports 100 Continue behavior."""
class AWSHTTPSConnection(AWSConnection, VerifiedHTTPSConnection):
"""An HTTPSConnection that supports 100 Continue behavior."""
class AWSHTTPConnectionPool(HTTPConnectionPool):
ConnectionCls = AWSHTTPConnection
class AWSHTTPSConnectionPool(HTTPSConnectionPool):
ConnectionCls = AWSHTTPSConnection
def prepare_request_dict(
request_dict, endpoint_url, context=None, user_agent=None
):
"""
This method prepares a request dict to be created into an
AWSRequestObject. This prepares the request dict by adding the
url and the user agent to the request dict.
:type request_dict: dict
:param request_dict: The request dict (created from the
``serialize`` module).
:type user_agent: string
:param user_agent: The user agent to use for this request.
:type endpoint_url: string
:param endpoint_url: The full endpoint url, which contains at least
the scheme, the hostname, and optionally any path components.
"""
r = request_dict
if user_agent is not None:
headers = r['headers']
headers['User-Agent'] = user_agent
host_prefix = r.get('host_prefix')
url = _urljoin(endpoint_url, r['url_path'], host_prefix)
if r['query_string']:
# NOTE: This is to avoid circular import with utils. This is being
# done to avoid moving classes to different modules as to not cause
# breaking chainges.
percent_encode_sequence = botocore.utils.percent_encode_sequence
encoded_query_string = percent_encode_sequence(r['query_string'])
if '?' not in url:
url += '?%s' % encoded_query_string
else:
url += '&%s' % encoded_query_string
r['url'] = url
r['context'] = context
if context is None:
r['context'] = {}
def create_request_object(request_dict):
"""
This method takes a request dict and creates an AWSRequest object
from it.
:type request_dict: dict
:param request_dict: The request dict (created from the
``prepare_request_dict`` method).
:rtype: ``botocore.awsrequest.AWSRequest``
:return: An AWSRequest object based on the request_dict.
"""
r = request_dict
request_object = AWSRequest(
method=r['method'],
url=r['url'],
data=r['body'],
headers=r['headers'],
auth_path=r.get('auth_path'),
)
request_object.context = r['context']
return request_object
def _urljoin(endpoint_url, url_path, host_prefix):
p = urlsplit(endpoint_url)
# <part> - <index>
# scheme - p[0]
# netloc - p[1]
# path - p[2]
# query - p[3]
# fragment - p[4]
if not url_path or url_path == '/':
# If there's no path component, ensure the URL ends with
# a '/' for backwards compatibility.
if not p[2]:
new_path = '/'
else:
new_path = p[2]
elif p[2].endswith('/') and url_path.startswith('/'):
new_path = p[2][:-1] + url_path
else:
new_path = p[2] + url_path
new_netloc = p[1]
if host_prefix is not None:
new_netloc = host_prefix + new_netloc
reconstructed = urlunsplit((p[0], new_netloc, new_path, p[3], p[4]))
return reconstructed
class AWSRequestPreparer:
"""
This class performs preparation on AWSRequest objects similar to that of
the PreparedRequest class does in the requests library. However, the logic
has been boiled down to meet the specific use cases in botocore. Of note
there are the following differences:
This class does not heavily prepare the URL. Requests performed many
validations and corrections to ensure the URL is properly formatted.
Botocore either performs these validations elsewhere or otherwise
consistently provides well formatted URLs.
This class does not heavily prepare the body. Body preperation is
simple and supports only the cases that we document: bytes and
file-like objects to determine the content-length. This will also
additionally prepare a body that is a dict to be url encoded params
string as some signers rely on this. Finally, this class does not
support multipart file uploads.
This class does not prepare the method, auth or cookies.
"""
def prepare(self, original):
method = original.method
url = self._prepare_url(original)
body = self._prepare_body(original)
headers = self._prepare_headers(original, body)
stream_output = original.stream_output
return AWSPreparedRequest(method, url, headers, body, stream_output)
def _prepare_url(self, original):
url = original.url
if original.params:
url_parts = urlparse(url)
delim = '&' if url_parts.query else '?'
if isinstance(original.params, Mapping):
params_to_encode = list(original.params.items())
else:
params_to_encode = original.params
params = urlencode(params_to_encode, doseq=True)
url = delim.join((url, params))
return url
def _prepare_headers(self, original, prepared_body=None):
headers = HeadersDict(original.headers.items())
# If the transfer encoding or content length is already set, use that
if 'Transfer-Encoding' in headers or 'Content-Length' in headers:
return headers
# Ensure we set the content length when it is expected
if original.method not in ('GET', 'HEAD', 'OPTIONS'):
length = self._determine_content_length(prepared_body)
if length is not None:
headers['Content-Length'] = str(length)
else:
# Failed to determine content length, using chunked
# NOTE: This shouldn't ever happen in practice
body_type = type(prepared_body)
logger.debug('Failed to determine length of %s', body_type)
headers['Transfer-Encoding'] = 'chunked'
return headers
def _to_utf8(self, item):
key, value = item
if isinstance(key, str):
key = key.encode('utf-8')
if isinstance(value, str):
value = value.encode('utf-8')
return key, value
def _prepare_body(self, original):
"""Prepares the given HTTP body data."""
body = original.data
if body == b'':
body = None
if isinstance(body, dict):
params = [self._to_utf8(item) for item in body.items()]
body = urlencode(params, doseq=True)
return body
def _determine_content_length(self, body):
return botocore.utils.determine_content_length(body)
class AWSRequest:
"""Represents the elements of an HTTP request.
This class was originally inspired by requests.models.Request, but has been
boiled down to meet the specific use cases in botocore. That being said this
class (even in requests) is effectively a named-tuple.
"""
_REQUEST_PREPARER_CLS = AWSRequestPreparer
def __init__(
self,
method=None,
url=None,
headers=None,
data=None,
params=None,
auth_path=None,
stream_output=False,
):
self._request_preparer = self._REQUEST_PREPARER_CLS()
# Default empty dicts for dict params.
params = {} if params is None else params
self.method = method
self.url = url
self.headers = HTTPHeaders()
self.data = data
self.params = params
self.auth_path = auth_path
self.stream_output = stream_output
if headers is not None:
for key, value in headers.items():
self.headers[key] = value
# This is a dictionary to hold information that is used when
# processing the request. What is inside of ``context`` is open-ended.
# For example, it may have a timestamp key that is used for holding
# what the timestamp is when signing the request. Note that none
# of the information that is inside of ``context`` is directly
# sent over the wire; the information is only used to assist in
# creating what is sent over the wire.
self.context = {}
def prepare(self):
"""Constructs a :class:`AWSPreparedRequest <AWSPreparedRequest>`."""
return self._request_preparer.prepare(self)
@property
def body(self):
body = self.prepare().body
if isinstance(body, str):
body = body.encode('utf-8')
return body
class AWSPreparedRequest:
"""A data class representing a finalized request to be sent over the wire.
Requests at this stage should be treated as final, and the properties of
the request should not be modified.
:ivar method: The HTTP Method
:ivar url: The full url
:ivar headers: The HTTP headers to send.
:ivar body: The HTTP body.
:ivar stream_output: If the response for this request should be streamed.
"""
def __init__(self, method, url, headers, body, stream_output):
self.method = method
self.url = url
self.headers = headers
self.body = body
self.stream_output = stream_output
def __repr__(self):
fmt = (
'<AWSPreparedRequest stream_output=%s, method=%s, url=%s, '
'headers=%s>'
)
return fmt % (self.stream_output, self.method, self.url, self.headers)
def reset_stream(self):
"""Resets the streaming body to it's initial position.
If the request contains a streaming body (a streamable file-like object)
seek to the object's initial position to ensure the entire contents of
the object is sent. This is a no-op for static bytes-like body types.
"""
# Trying to reset a stream when there is a no stream will
# just immediately return. It's not an error, it will produce
# the same result as if we had actually reset the stream (we'll send
# the entire body contents again if we need to).
# Same case if the body is a string/bytes/bytearray type.
non_seekable_types = (bytes, str, bytearray)
if self.body is None or isinstance(self.body, non_seekable_types):
return
try:
logger.debug("Rewinding stream: %s", self.body)
self.body.seek(0)
except Exception as e:
logger.debug("Unable to rewind stream: %s", e)
raise UnseekableStreamError(stream_object=self.body)
class AWSResponse:
"""A data class representing an HTTP response.
This class was originally inspired by requests.models.Response, but has
been boiled down to meet the specific use cases in botocore. This has
effectively been reduced to a named tuple.
:ivar url: The full url.
:ivar status_code: The status code of the HTTP response.
:ivar headers: The HTTP headers received.
:ivar body: The HTTP response body.
"""
def __init__(self, url, status_code, headers, raw):
self.url = url
self.status_code = status_code
self.headers = HeadersDict(headers)
self.raw = raw
self._content = None
@property
def content(self):
"""Content of the response as bytes."""
if self._content is None:
# Read the contents.
# NOTE: requests would attempt to call stream and fall back
# to a custom generator that would call read in a loop, but
# we don't rely on this behavior
self._content = b''.join(self.raw.stream()) or b''
return self._content
@property
def text(self):
"""Content of the response as a proper text type.
Uses the encoding type provided in the reponse headers to decode the
response content into a proper text type. If the encoding is not
present in the headers, UTF-8 is used as a default.
"""
encoding = botocore.utils.get_encoding_from_headers(self.headers)
if encoding:
return self.content.decode(encoding)
else:
return self.content.decode('utf-8')
class _HeaderKey:
def __init__(self, key):
self._key = key
self._lower = key.lower()
def __hash__(self):
return hash(self._lower)
def __eq__(self, other):
return isinstance(other, _HeaderKey) and self._lower == other._lower
def __str__(self):
return self._key
def __repr__(self):
return repr(self._key)
class HeadersDict(MutableMapping):
"""A case-insenseitive dictionary to represent HTTP headers."""
def __init__(self, *args, **kwargs):
self._dict = {}
self.update(*args, **kwargs)
def __setitem__(self, key, value):
self._dict[_HeaderKey(key)] = value
def __getitem__(self, key):
return self._dict[_HeaderKey(key)]
def __delitem__(self, key):
del self._dict[_HeaderKey(key)]
def __iter__(self):
return (str(key) for key in self._dict)
def __len__(self):
return len(self._dict)
def __repr__(self):
return repr(self._dict)
def copy(self):
return HeadersDict(self.items())

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,347 @@
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import copy
import datetime
import sys
import inspect
import warnings
import hashlib
from http.client import HTTPMessage
import logging
import shlex
import re
import os
from collections import OrderedDict
from collections.abc import MutableMapping
from math import floor
from botocore.vendored import six
from botocore.exceptions import MD5UnavailableError
from dateutil.tz import tzlocal
from urllib3 import exceptions
logger = logging.getLogger(__name__)
class HTTPHeaders(HTTPMessage):
pass
from urllib.parse import (
quote,
urlencode,
unquote,
unquote_plus,
urlparse,
urlsplit,
urlunsplit,
urljoin,
parse_qsl,
parse_qs,
)
from http.client import HTTPResponse
from io import IOBase as _IOBase
from base64 import encodebytes
from email.utils import formatdate
from itertools import zip_longest
file_type = _IOBase
zip = zip
# In python3, unquote takes a str() object, url decodes it,
# then takes the bytestring and decodes it to utf-8.
unquote_str = unquote_plus
def set_socket_timeout(http_response, timeout):
"""Set the timeout of the socket from an HTTPResponse.
:param http_response: An instance of ``httplib.HTTPResponse``
"""
http_response._fp.fp.raw._sock.settimeout(timeout)
def accepts_kwargs(func):
# In python3.4.1, there's backwards incompatible
# changes when using getargspec with functools.partials.
return inspect.getfullargspec(func)[2]
def ensure_unicode(s, encoding=None, errors=None):
# NOOP in Python 3, because every string is already unicode
return s
def ensure_bytes(s, encoding='utf-8', errors='strict'):
if isinstance(s, str):
return s.encode(encoding, errors)
if isinstance(s, bytes):
return s
raise ValueError(f"Expected str or bytes, received {type(s)}.")
try:
import xml.etree.cElementTree as ETree
except ImportError:
# cElementTree does not exist from Python3.9+
import xml.etree.ElementTree as ETree
XMLParseError = ETree.ParseError
import json
def filter_ssl_warnings():
# Ignore warnings related to SNI as it is not being used in validations.
warnings.filterwarnings(
'ignore',
message="A true SSLContext object is not available.*",
category=exceptions.InsecurePlatformWarning,
module=r".*urllib3\.util\.ssl_",
)
@classmethod
def from_dict(cls, d):
new_instance = cls()
for key, value in d.items():
new_instance[key] = value
return new_instance
@classmethod
def from_pairs(cls, pairs):
new_instance = cls()
for key, value in pairs:
new_instance[key] = value
return new_instance
HTTPHeaders.from_dict = from_dict
HTTPHeaders.from_pairs = from_pairs
def copy_kwargs(kwargs):
"""
This used to be a compat shim for 2.6 but is now just an alias.
"""
copy_kwargs = copy.copy(kwargs)
return copy_kwargs
def total_seconds(delta):
"""
Returns the total seconds in a ``datetime.timedelta``.
This used to be a compat shim for 2.6 but is now just an alias.
:param delta: The timedelta object
:type delta: ``datetime.timedelta``
"""
return delta.total_seconds()
# Checks to see if md5 is available on this system. A given system might not
# have access to it for various reasons, such as FIPS mode being enabled.
try:
hashlib.md5()
MD5_AVAILABLE = True
except ValueError:
MD5_AVAILABLE = False
def get_md5(*args, **kwargs):
"""
Attempts to get an md5 hashing object.
:param args: Args to pass to the MD5 constructor
:param kwargs: Key word arguments to pass to the MD5 constructor
:return: An MD5 hashing object if available. If it is unavailable, None
is returned if raise_error_if_unavailable is set to False.
"""
if MD5_AVAILABLE:
return hashlib.md5(*args, **kwargs)
else:
raise MD5UnavailableError()
def compat_shell_split(s, platform=None):
if platform is None:
platform = sys.platform
if platform == "win32":
return _windows_shell_split(s)
else:
return shlex.split(s)
def _windows_shell_split(s):
"""Splits up a windows command as the built-in command parser would.
Windows has potentially bizarre rules depending on where you look. When
spawning a process via the Windows C runtime (which is what python does
when you call popen) the rules are as follows:
https://docs.microsoft.com/en-us/cpp/cpp/parsing-cpp-command-line-arguments
To summarize:
* Only space and tab are valid delimiters
* Double quotes are the only valid quotes
* Backslash is interpreted literally unless it is part of a chain that
leads up to a double quote. Then the backslashes escape the backslashes,
and if there is an odd number the final backslash escapes the quote.
:param s: The command string to split up into parts.
:return: A list of command components.
"""
if not s:
return []
components = []
buff = []
is_quoted = False
num_backslashes = 0
for character in s:
if character == '\\':
# We can't simply append backslashes because we don't know if
# they are being used as escape characters or not. Instead we
# keep track of how many we've encountered and handle them when
# we encounter a different character.
num_backslashes += 1
elif character == '"':
if num_backslashes > 0:
# The backslashes are in a chain leading up to a double
# quote, so they are escaping each other.
buff.append('\\' * int(floor(num_backslashes / 2)))
remainder = num_backslashes % 2
num_backslashes = 0
if remainder == 1:
# The number of backslashes is uneven, so they are also
# escaping the double quote, so it needs to be added to
# the current component buffer.
buff.append('"')
continue
# We've encountered a double quote that is not escaped,
# so we toggle is_quoted.
is_quoted = not is_quoted
# If there are quotes, then we may want an empty string. To be
# safe, we add an empty string to the buffer so that we make
# sure it sticks around if there's nothing else between quotes.
# If there is other stuff between quotes, the empty string will
# disappear during the joining process.
buff.append('')
elif character in [' ', '\t'] and not is_quoted:
# Since the backslashes aren't leading up to a quote, we put in
# the exact number of backslashes.
if num_backslashes > 0:
buff.append('\\' * num_backslashes)
num_backslashes = 0
# Excess whitespace is ignored, so only add the components list
# if there is anything in the buffer.
if buff:
components.append(''.join(buff))
buff = []
else:
# Since the backslashes aren't leading up to a quote, we put in
# the exact number of backslashes.
if num_backslashes > 0:
buff.append('\\' * num_backslashes)
num_backslashes = 0
buff.append(character)
# Quotes must be terminated.
if is_quoted:
raise ValueError(f"No closing quotation in string: {s}")
# There may be some leftover backslashes, so we need to add them in.
# There's no quote so we add the exact number.
if num_backslashes > 0:
buff.append('\\' * num_backslashes)
# Add the final component in if there is anything in the buffer.
if buff:
components.append(''.join(buff))
return components
def get_tzinfo_options():
# Due to dateutil/dateutil#197, Windows may fail to parse times in the past
# with the system clock. We can alternatively fallback to tzwininfo when
# this happens, which will get time info from the Windows registry.
if sys.platform == 'win32':
from dateutil.tz import tzwinlocal
return (tzlocal, tzwinlocal)
else:
return (tzlocal,)
# Detect if CRT is available for use
try:
import awscrt.auth
# Allow user opt-out if needed
disabled = os.environ.get('BOTO_DISABLE_CRT', "false")
HAS_CRT = not disabled.lower() == 'true'
except ImportError:
HAS_CRT = False
########################################################
# urllib3 compat backports #
########################################################
# Vendoring IPv6 validation regex patterns from urllib3
# https://github.com/urllib3/urllib3/blob/7e856c0/src/urllib3/util/url.py
IPV4_PAT = r"(?:[0-9]{1,3}\.){3}[0-9]{1,3}"
IPV4_RE = re.compile("^" + IPV4_PAT + "$")
HEX_PAT = "[0-9A-Fa-f]{1,4}"
LS32_PAT = "(?:{hex}:{hex}|{ipv4})".format(hex=HEX_PAT, ipv4=IPV4_PAT)
_subs = {"hex": HEX_PAT, "ls32": LS32_PAT}
_variations = [
# 6( h16 ":" ) ls32
"(?:%(hex)s:){6}%(ls32)s",
# "::" 5( h16 ":" ) ls32
"::(?:%(hex)s:){5}%(ls32)s",
# [ h16 ] "::" 4( h16 ":" ) ls32
"(?:%(hex)s)?::(?:%(hex)s:){4}%(ls32)s",
# [ *1( h16 ":" ) h16 ] "::" 3( h16 ":" ) ls32
"(?:(?:%(hex)s:)?%(hex)s)?::(?:%(hex)s:){3}%(ls32)s",
# [ *2( h16 ":" ) h16 ] "::" 2( h16 ":" ) ls32
"(?:(?:%(hex)s:){0,2}%(hex)s)?::(?:%(hex)s:){2}%(ls32)s",
# [ *3( h16 ":" ) h16 ] "::" h16 ":" ls32
"(?:(?:%(hex)s:){0,3}%(hex)s)?::%(hex)s:%(ls32)s",
# [ *4( h16 ":" ) h16 ] "::" ls32
"(?:(?:%(hex)s:){0,4}%(hex)s)?::%(ls32)s",
# [ *5( h16 ":" ) h16 ] "::" h16
"(?:(?:%(hex)s:){0,5}%(hex)s)?::%(hex)s",
# [ *6( h16 ":" ) h16 ] "::"
"(?:(?:%(hex)s:){0,6}%(hex)s)?::",
]
UNRESERVED_PAT = (
r"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789._!\-~"
)
IPV6_PAT = "(?:" + "|".join([x % _subs for x in _variations]) + ")"
ZONE_ID_PAT = "(?:%25|%)(?:[" + UNRESERVED_PAT + "]|%[a-fA-F0-9]{2})+"
IPV6_ADDRZ_PAT = r"\[" + IPV6_PAT + r"(?:" + ZONE_ID_PAT + r")?\]"
IPV6_ADDRZ_RE = re.compile("^" + IPV6_ADDRZ_PAT + "$")
# These are the characters that are stripped by post-bpo-43882 urlparse().
UNSAFE_URL_CHARS = frozenset('\t\r\n')
# Detect if gzip is available for use
try:
import gzip
HAS_GZIP = True
except ImportError:
HAS_GZIP = False

View File

@@ -0,0 +1,126 @@
# Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""
NOTE: All functions in this module are considered private and are
subject to abrupt breaking changes. Please do not use them directly.
"""
import io
import logging
from gzip import GzipFile
from gzip import compress as gzip_compress
from botocore.compat import urlencode
from botocore.utils import determine_content_length
logger = logging.getLogger(__name__)
def maybe_compress_request(config, request_dict, operation_model):
"""Attempt to compress the request body using the modeled encodings."""
if _should_compress_request(config, request_dict, operation_model):
for encoding in operation_model.request_compression['encodings']:
encoder = COMPRESSION_MAPPING.get(encoding)
if encoder is not None:
logger.debug('Compressing request with %s encoding.', encoding)
request_dict['body'] = encoder(request_dict['body'])
_set_compression_header(request_dict['headers'], encoding)
return
else:
logger.debug('Unsupported compression encoding: %s', encoding)
def _should_compress_request(config, request_dict, operation_model):
if (
config.disable_request_compression is not True
and config.signature_version != 'v2'
and operation_model.request_compression is not None
):
if not _is_compressible_type(request_dict):
body_type = type(request_dict['body'])
log_msg = 'Body type %s does not support compression.'
logger.debug(log_msg, body_type)
return False
if operation_model.has_streaming_input:
streaming_input = operation_model.get_streaming_input()
streaming_metadata = streaming_input.metadata
return 'requiresLength' not in streaming_metadata
body_size = _get_body_size(request_dict['body'])
min_size = config.request_min_compression_size_bytes
return min_size <= body_size
return False
def _is_compressible_type(request_dict):
body = request_dict['body']
# Coerce dict to a format compatible with compression.
if isinstance(body, dict):
body = urlencode(body, doseq=True, encoding='utf-8').encode('utf-8')
request_dict['body'] = body
is_supported_type = isinstance(body, (str, bytes, bytearray))
return is_supported_type or hasattr(body, 'read')
def _get_body_size(body):
size = determine_content_length(body)
if size is None:
logger.debug(
'Unable to get length of the request body: %s. '
'Skipping compression.',
body,
)
size = 0
return size
def _gzip_compress_body(body):
if isinstance(body, str):
return gzip_compress(body.encode('utf-8'))
elif isinstance(body, (bytes, bytearray)):
return gzip_compress(body)
elif hasattr(body, 'read'):
if hasattr(body, 'seek') and hasattr(body, 'tell'):
current_position = body.tell()
compressed_obj = _gzip_compress_fileobj(body)
body.seek(current_position)
return compressed_obj
return _gzip_compress_fileobj(body)
def _gzip_compress_fileobj(body):
compressed_obj = io.BytesIO()
with GzipFile(fileobj=compressed_obj, mode='wb') as gz:
while True:
chunk = body.read(8192)
if not chunk:
break
if isinstance(chunk, str):
chunk = chunk.encode('utf-8')
gz.write(chunk)
compressed_obj.seek(0)
return compressed_obj
def _set_compression_header(headers, encoding):
ce_header = headers.get('Content-Encoding')
if ce_header is None:
headers['Content-Encoding'] = encoding
else:
headers['Content-Encoding'] = f'{ce_header},{encoding}'
COMPRESSION_MAPPING = {'gzip': _gzip_compress_body}

View File

@@ -0,0 +1,376 @@
# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import copy
from botocore.compat import OrderedDict
from botocore.endpoint import DEFAULT_TIMEOUT, MAX_POOL_CONNECTIONS
from botocore.exceptions import (
InvalidMaxRetryAttemptsError,
InvalidRetryConfigurationError,
InvalidRetryModeError,
InvalidS3AddressingStyleError,
)
class Config:
"""Advanced configuration for Botocore clients.
:type region_name: str
:param region_name: The region to use in instantiating the client
:type signature_version: str
:param signature_version: The signature version when signing requests.
:type user_agent: str
:param user_agent: The value to use in the User-Agent header.
:type user_agent_extra: str
:param user_agent_extra: The value to append to the current User-Agent
header value.
:type user_agent_appid: str
:param user_agent_appid: A value that gets included in the User-Agent
string in the format "app/<user_agent_appid>". Allowed characters are
ASCII alphanumerics and ``!$%&'*+-.^_`|~``. All other characters will
be replaced by a ``-``.
:type connect_timeout: float or int
:param connect_timeout: The time in seconds till a timeout exception is
thrown when attempting to make a connection. The default is 60
seconds.
:type read_timeout: float or int
:param read_timeout: The time in seconds till a timeout exception is
thrown when attempting to read from a connection. The default is
60 seconds.
:type parameter_validation: bool
:param parameter_validation: Whether parameter validation should occur
when serializing requests. The default is True. You can disable
parameter validation for performance reasons. Otherwise, it's
recommended to leave parameter validation enabled.
:type max_pool_connections: int
:param max_pool_connections: The maximum number of connections to
keep in a connection pool. If this value is not set, the default
value of 10 is used.
:type proxies: dict
:param proxies: A dictionary of proxy servers to use by protocol or
endpoint, e.g.:
``{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}``.
The proxies are used on each request.
:type proxies_config: dict
:param proxies_config: A dictionary of additional proxy configurations.
Valid keys are:
* ``proxy_ca_bundle`` -- The path to a custom certificate bundle to use
when establishing SSL/TLS connections with proxy.
* ``proxy_client_cert`` -- The path to a certificate for proxy
TLS client authentication.
When a string is provided it is treated as a path to a proxy client
certificate. When a two element tuple is provided, it will be
interpreted as the path to the client certificate, and the path
to the certificate key.
* ``proxy_use_forwarding_for_https`` -- For HTTPS proxies,
forward your requests to HTTPS destinations with an absolute
URI. We strongly recommend you only use this option with
trusted or corporate proxies. Value must be boolean.
:type s3: dict
:param s3: A dictionary of S3 specific configurations.
Valid keys are:
* ``use_accelerate_endpoint`` -- Refers to whether to use the S3
Accelerate endpoint. The value must be a boolean. If True, the
client will use the S3 Accelerate endpoint. If the S3 Accelerate
endpoint is being used then the addressing style will always
be virtual.
* ``payload_signing_enabled`` -- Refers to whether or not to SHA256
sign sigv4 payloads. By default, this is disabled for streaming
uploads (UploadPart and PutObject).
* ``addressing_style`` -- Refers to the style in which to address
s3 endpoints. Values must be a string that equals one of:
* ``auto`` -- Addressing style is chosen for user. Depending
on the configuration of client, the endpoint may be addressed in
the virtual or the path style. Note that this is the default
behavior if no style is specified.
* ``virtual`` -- Addressing style is always virtual. The name of the
bucket must be DNS compatible or an exception will be thrown.
Endpoints will be addressed as such: ``mybucket.s3.amazonaws.com``
* ``path`` -- Addressing style is always by path. Endpoints will be
addressed as such: ``s3.amazonaws.com/mybucket``
* ``us_east_1_regional_endpoint`` -- Refers to what S3 endpoint to use
when the region is configured to be us-east-1. Values must be a
string that equals:
* ``regional`` -- Use the us-east-1.amazonaws.com endpoint if the
client is configured to use the us-east-1 region.
* ``legacy`` -- Use the s3.amazonaws.com endpoint if the client is
configured to use the us-east-1 region. This is the default if
the configuration option is not specified.
:type retries: dict
:param retries: A dictionary for configuration related to retry behavior.
Valid keys are:
* ``total_max_attempts`` -- An integer representing the maximum number of
total attempts that will be made on a single request. This includes
the initial request, so a value of 1 indicates that no requests
will be retried. If ``total_max_attempts`` and ``max_attempts``
are both provided, ``total_max_attempts`` takes precedence.
``total_max_attempts`` is preferred over ``max_attempts`` because
it maps to the ``AWS_MAX_ATTEMPTS`` environment variable and
the ``max_attempts`` config file value.
* ``max_attempts`` -- An integer representing the maximum number of
retry attempts that will be made on a single request. For
example, setting this value to 2 will result in the request
being retried at most two times after the initial request. Setting
this value to 0 will result in no retries ever being attempted after
the initial request. If not provided, the number of retries will
default to the value specified in the service model, which is
typically four retries.
* ``mode`` -- A string representing the type of retry mode botocore
should use. Valid values are:
* ``legacy`` - The pre-existing retry behavior.
* ``standard`` - The standardized set of retry rules. This will also
default to 3 max attempts unless overridden.
* ``adaptive`` - Retries with additional client side throttling.
:type client_cert: str, (str, str)
:param client_cert: The path to a certificate for TLS client authentication.
When a string is provided it is treated as a path to a client
certificate to be used when creating a TLS connection.
If a client key is to be provided alongside the client certificate the
client_cert should be set to a tuple of length two where the first
element is the path to the client certificate and the second element is
the path to the certificate key.
:type inject_host_prefix: bool
:param inject_host_prefix: Whether host prefix injection should occur.
Defaults to True.
Setting this to False disables the injection of operation parameters
into the prefix of the hostname. This is useful for clients providing
custom endpoints that should not have their host prefix modified.
:type use_dualstack_endpoint: bool
:param use_dualstack_endpoint: Setting to True enables dualstack
endpoint resolution.
Defaults to None.
:type use_fips_endpoint: bool
:param use_fips_endpoint: Setting to True enables fips
endpoint resolution.
Defaults to None.
:type ignore_configured_endpoint_urls: bool
:param ignore_configured_endpoint_urls: Setting to True disables use
of endpoint URLs provided via environment variables and
the shared configuration file.
Defaults to None.
:type tcp_keepalive: bool
:param tcp_keepalive: Enables the TCP Keep-Alive socket option used when
creating new connections if set to True.
Defaults to False.
:type request_min_compression_size_bytes: int
:param request_min_compression_size_bytes: The minimum size in bytes that a
request body should be to trigger compression. All requests with
streaming input that don't contain the ``requiresLength`` trait will be
compressed regardless of this setting.
Defaults to None.
:type disable_request_compression: bool
:param disable_request_compression: Disables request body compression if
set to True.
Defaults to None.
:type client_context_params: dict
:param client_context_params: A dictionary of parameters specific to
individual services. If available, valid parameters can be found in
the ``Client Context Parameters`` section of the service client's
documentation. Invalid parameters or ones that are not used by the
specified service will be ignored.
Defaults to None.
"""
OPTION_DEFAULTS = OrderedDict(
[
('region_name', None),
('signature_version', None),
('user_agent', None),
('user_agent_extra', None),
('user_agent_appid', None),
('connect_timeout', DEFAULT_TIMEOUT),
('read_timeout', DEFAULT_TIMEOUT),
('parameter_validation', True),
('max_pool_connections', MAX_POOL_CONNECTIONS),
('proxies', None),
('proxies_config', None),
('s3', None),
('retries', None),
('client_cert', None),
('inject_host_prefix', True),
('endpoint_discovery_enabled', None),
('use_dualstack_endpoint', None),
('use_fips_endpoint', None),
('ignore_configured_endpoint_urls', None),
('defaults_mode', None),
('tcp_keepalive', None),
('request_min_compression_size_bytes', None),
('disable_request_compression', None),
('client_context_params', None),
]
)
NON_LEGACY_OPTION_DEFAULTS = {
'connect_timeout': None,
}
def __init__(self, *args, **kwargs):
self._user_provided_options = self._record_user_provided_options(
args, kwargs
)
# Merge the user_provided options onto the default options
config_vars = copy.copy(self.OPTION_DEFAULTS)
defaults_mode = self._user_provided_options.get(
'defaults_mode', 'legacy'
)
if defaults_mode != 'legacy':
config_vars.update(self.NON_LEGACY_OPTION_DEFAULTS)
config_vars.update(self._user_provided_options)
# Set the attributes based on the config_vars
for key, value in config_vars.items():
setattr(self, key, value)
# Validate the s3 options
self._validate_s3_configuration(self.s3)
self._validate_retry_configuration(self.retries)
def _record_user_provided_options(self, args, kwargs):
option_order = list(self.OPTION_DEFAULTS)
user_provided_options = {}
# Iterate through the kwargs passed through to the constructor and
# map valid keys to the dictionary
for key, value in kwargs.items():
if key in self.OPTION_DEFAULTS:
user_provided_options[key] = value
# The key must exist in the available options
else:
raise TypeError(f"Got unexpected keyword argument '{key}'")
# The number of args should not be longer than the allowed
# options
if len(args) > len(option_order):
raise TypeError(
f"Takes at most {len(option_order)} arguments ({len(args)} given)"
)
# Iterate through the args passed through to the constructor and map
# them to appropriate keys.
for i, arg in enumerate(args):
# If a kwarg was specified for the arg, then error out
if option_order[i] in user_provided_options:
raise TypeError(
f"Got multiple values for keyword argument '{option_order[i]}'"
)
user_provided_options[option_order[i]] = arg
return user_provided_options
def _validate_s3_configuration(self, s3):
if s3 is not None:
addressing_style = s3.get('addressing_style')
if addressing_style not in ['virtual', 'auto', 'path', None]:
raise InvalidS3AddressingStyleError(
s3_addressing_style=addressing_style
)
def _validate_retry_configuration(self, retries):
valid_options = ('max_attempts', 'mode', 'total_max_attempts')
valid_modes = ('legacy', 'standard', 'adaptive')
if retries is not None:
for key, value in retries.items():
if key not in valid_options:
raise InvalidRetryConfigurationError(
retry_config_option=key,
valid_options=valid_options,
)
if key == 'max_attempts' and value < 0:
raise InvalidMaxRetryAttemptsError(
provided_max_attempts=value,
min_value=0,
)
if key == 'total_max_attempts' and value < 1:
raise InvalidMaxRetryAttemptsError(
provided_max_attempts=value,
min_value=1,
)
if key == 'mode' and value not in valid_modes:
raise InvalidRetryModeError(
provided_retry_mode=value,
valid_modes=valid_modes,
)
def merge(self, other_config):
"""Merges the config object with another config object
This will merge in all non-default values from the provided config
and return a new config object
:type other_config: botocore.config.Config
:param other config: Another config object to merge with. The values
in the provided config object will take precedence in the merging
:returns: A config object built from the merged values of both
config objects.
"""
# Make a copy of the current attributes in the config object.
config_options = copy.copy(self._user_provided_options)
# Merge in the user provided options from the other config
config_options.update(other_config._user_provided_options)
# Return a new config object with the merged properties.
return Config(**config_options)

View File

@@ -0,0 +1,287 @@
# Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
# Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import configparser
import copy
import os
import shlex
import sys
import botocore.exceptions
def multi_file_load_config(*filenames):
"""Load and combine multiple INI configs with profiles.
This function will take a list of filesnames and return
a single dictionary that represents the merging of the loaded
config files.
If any of the provided filenames does not exist, then that file
is ignored. It is therefore ok to provide a list of filenames,
some of which may not exist.
Configuration files are **not** deep merged, only the top level
keys are merged. The filenames should be passed in order of
precedence. The first config file has precedence over the
second config file, which has precedence over the third config file,
etc. The only exception to this is that the "profiles" key is
merged to combine profiles from multiple config files into a
single profiles mapping. However, if a profile is defined in
multiple config files, then the config file with the highest
precedence is used. Profile values themselves are not merged.
For example::
FileA FileB FileC
[foo] [foo] [bar]
a=1 a=2 a=3
b=2
[bar] [baz] [profile a]
a=2 a=3 region=e
[profile a] [profile b] [profile c]
region=c region=d region=f
The final result of ``multi_file_load_config(FileA, FileB, FileC)``
would be::
{"foo": {"a": 1}, "bar": {"a": 2}, "baz": {"a": 3},
"profiles": {"a": {"region": "c"}}, {"b": {"region": d"}},
{"c": {"region": "f"}}}
Note that the "foo" key comes from A, even though it's defined in both
FileA and FileB. Because "foo" was defined in FileA first, then the values
for "foo" from FileA are used and the values for "foo" from FileB are
ignored. Also note where the profiles originate from. Profile "a"
comes FileA, profile "b" comes from FileB, and profile "c" comes
from FileC.
"""
configs = []
profiles = []
for filename in filenames:
try:
loaded = load_config(filename)
except botocore.exceptions.ConfigNotFound:
continue
profiles.append(loaded.pop('profiles'))
configs.append(loaded)
merged_config = _merge_list_of_dicts(configs)
merged_profiles = _merge_list_of_dicts(profiles)
merged_config['profiles'] = merged_profiles
return merged_config
def _merge_list_of_dicts(list_of_dicts):
merged_dicts = {}
for single_dict in list_of_dicts:
for key, value in single_dict.items():
if key not in merged_dicts:
merged_dicts[key] = value
return merged_dicts
def load_config(config_filename):
"""Parse a INI config with profiles.
This will parse an INI config file and map top level profiles
into a top level "profile" key.
If you want to parse an INI file and map all section names to
top level keys, use ``raw_config_parse`` instead.
"""
parsed = raw_config_parse(config_filename)
return build_profile_map(parsed)
def raw_config_parse(config_filename, parse_subsections=True):
"""Returns the parsed INI config contents.
Each section name is a top level key.
:param config_filename: The name of the INI file to parse
:param parse_subsections: If True, parse indented blocks as
subsections that represent their own configuration dictionary.
For example, if the config file had the contents::
s3 =
signature_version = s3v4
addressing_style = path
The resulting ``raw_config_parse`` would be::
{'s3': {'signature_version': 's3v4', 'addressing_style': 'path'}}
If False, do not try to parse subsections and return the indented
block as its literal value::
{'s3': '\nsignature_version = s3v4\naddressing_style = path'}
:returns: A dict with keys for each profile found in the config
file and the value of each key being a dict containing name
value pairs found in that profile.
:raises: ConfigNotFound, ConfigParseError
"""
config = {}
path = config_filename
if path is not None:
path = os.path.expandvars(path)
path = os.path.expanduser(path)
if not os.path.isfile(path):
raise botocore.exceptions.ConfigNotFound(path=_unicode_path(path))
cp = configparser.RawConfigParser()
try:
cp.read([path])
except (configparser.Error, UnicodeDecodeError) as e:
raise botocore.exceptions.ConfigParseError(
path=_unicode_path(path), error=e
) from None
else:
for section in cp.sections():
config[section] = {}
for option in cp.options(section):
config_value = cp.get(section, option)
if parse_subsections and config_value.startswith('\n'):
# Then we need to parse the inner contents as
# hierarchical. We support a single level
# of nesting for now.
try:
config_value = _parse_nested(config_value)
except ValueError as e:
raise botocore.exceptions.ConfigParseError(
path=_unicode_path(path), error=e
) from None
config[section][option] = config_value
return config
def _unicode_path(path):
if isinstance(path, str):
return path
# According to the documentation getfilesystemencoding can return None
# on unix in which case the default encoding is used instead.
filesystem_encoding = sys.getfilesystemencoding()
if filesystem_encoding is None:
filesystem_encoding = sys.getdefaultencoding()
return path.decode(filesystem_encoding, 'replace')
def _parse_nested(config_value):
# Given a value like this:
# \n
# foo = bar
# bar = baz
# We need to parse this into
# {'foo': 'bar', 'bar': 'baz}
parsed = {}
for line in config_value.splitlines():
line = line.strip()
if not line:
continue
# The caller will catch ValueError
# and raise an appropriate error
# if this fails.
key, value = line.split('=', 1)
parsed[key.strip()] = value.strip()
return parsed
def _parse_section(key, values):
result = {}
try:
parts = shlex.split(key)
except ValueError:
return result
if len(parts) == 2:
result[parts[1]] = values
return result
def build_profile_map(parsed_ini_config):
"""Convert the parsed INI config into a profile map.
The config file format requires that every profile except the
default to be prepended with "profile", e.g.::
[profile test]
aws_... = foo
aws_... = bar
[profile bar]
aws_... = foo
aws_... = bar
# This is *not* a profile
[preview]
otherstuff = 1
# Neither is this
[foobar]
morestuff = 2
The build_profile_map will take a parsed INI config file where each top
level key represents a section name, and convert into a format where all
the profiles are under a single top level "profiles" key, and each key in
the sub dictionary is a profile name. For example, the above config file
would be converted from::
{"profile test": {"aws_...": "foo", "aws...": "bar"},
"profile bar": {"aws...": "foo", "aws...": "bar"},
"preview": {"otherstuff": ...},
"foobar": {"morestuff": ...},
}
into::
{"profiles": {"test": {"aws_...": "foo", "aws...": "bar"},
"bar": {"aws...": "foo", "aws...": "bar"},
"preview": {"otherstuff": ...},
"foobar": {"morestuff": ...},
}
If there are no profiles in the provided parsed INI contents, then
an empty dict will be the value associated with the ``profiles`` key.
.. note::
This will not mutate the passed in parsed_ini_config. Instead it will
make a deepcopy and return that value.
"""
parsed_config = copy.deepcopy(parsed_ini_config)
profiles = {}
sso_sessions = {}
services = {}
final_config = {}
for key, values in parsed_config.items():
if key.startswith("profile"):
profiles.update(_parse_section(key, values))
elif key.startswith("sso-session"):
sso_sessions.update(_parse_section(key, values))
elif key.startswith("services"):
services.update(_parse_section(key, values))
elif key == 'default':
# default section is special and is considered a profile
# name but we don't require you use 'profile "default"'
# as a section.
profiles[key] = values
else:
final_config[key] = values
final_config['profiles'] = profiles
final_config['sso_sessions'] = sso_sessions
final_config['services'] = services
return final_config

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,27 @@
# Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
# A list of auth types supported by the signers in botocore/crt/auth.py. This
# should always match the keys of botocore.crt.auth.CRT_AUTH_TYPE_MAPS. The
# information is duplicated here so that it can be accessed in environments
# where `awscrt` is not present and any import from botocore.crt.auth would
# fail.
CRT_SUPPORTED_AUTH_TYPES = (
'v4',
'v4-query',
'v4a',
's3v4',
's3v4-query',
's3v4a',
's3v4a-query',
)

View File

@@ -0,0 +1,629 @@
# Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import datetime
from io import BytesIO
from botocore.auth import (
SIGNED_HEADERS_BLACKLIST,
STREAMING_UNSIGNED_PAYLOAD_TRAILER,
UNSIGNED_PAYLOAD,
BaseSigner,
_get_body_as_dict,
_host_from_url,
)
from botocore.compat import HTTPHeaders, awscrt, parse_qs, urlsplit, urlunsplit
from botocore.exceptions import NoCredentialsError
from botocore.utils import percent_encode_sequence
class CrtSigV4Auth(BaseSigner):
REQUIRES_REGION = True
_PRESIGNED_HEADERS_BLOCKLIST = [
'Authorization',
'X-Amz-Date',
'X-Amz-Content-SHA256',
'X-Amz-Security-Token',
]
_SIGNATURE_TYPE = awscrt.auth.AwsSignatureType.HTTP_REQUEST_HEADERS
_USE_DOUBLE_URI_ENCODE = True
_SHOULD_NORMALIZE_URI_PATH = True
def __init__(self, credentials, service_name, region_name):
self.credentials = credentials
self._service_name = service_name
self._region_name = region_name
self._expiration_in_seconds = None
def _is_streaming_checksum_payload(self, request):
checksum_context = request.context.get('checksum', {})
algorithm = checksum_context.get('request_algorithm')
return isinstance(algorithm, dict) and algorithm.get('in') == 'trailer'
def add_auth(self, request):
if self.credentials is None:
raise NoCredentialsError()
# Use utcnow() because that's what gets mocked by tests, but set
# timezone because CRT assumes naive datetime is local time.
datetime_now = datetime.datetime.utcnow().replace(
tzinfo=datetime.timezone.utc
)
# Use existing 'X-Amz-Content-SHA256' header if able
existing_sha256 = self._get_existing_sha256(request)
self._modify_request_before_signing(request)
credentials_provider = awscrt.auth.AwsCredentialsProvider.new_static(
access_key_id=self.credentials.access_key,
secret_access_key=self.credentials.secret_key,
session_token=self.credentials.token,
)
if self._is_streaming_checksum_payload(request):
explicit_payload = STREAMING_UNSIGNED_PAYLOAD_TRAILER
elif self._should_sha256_sign_payload(request):
if existing_sha256:
explicit_payload = existing_sha256
else:
explicit_payload = None # to be calculated during signing
else:
explicit_payload = UNSIGNED_PAYLOAD
if self._should_add_content_sha256_header(explicit_payload):
body_header = (
awscrt.auth.AwsSignedBodyHeaderType.X_AMZ_CONTENT_SHA_256
)
else:
body_header = awscrt.auth.AwsSignedBodyHeaderType.NONE
signing_config = awscrt.auth.AwsSigningConfig(
algorithm=awscrt.auth.AwsSigningAlgorithm.V4,
signature_type=self._SIGNATURE_TYPE,
credentials_provider=credentials_provider,
region=self._region_name,
service=self._service_name,
date=datetime_now,
should_sign_header=self._should_sign_header,
use_double_uri_encode=self._USE_DOUBLE_URI_ENCODE,
should_normalize_uri_path=self._SHOULD_NORMALIZE_URI_PATH,
signed_body_value=explicit_payload,
signed_body_header_type=body_header,
expiration_in_seconds=self._expiration_in_seconds,
)
crt_request = self._crt_request_from_aws_request(request)
future = awscrt.auth.aws_sign_request(crt_request, signing_config)
future.result()
self._apply_signing_changes(request, crt_request)
def _crt_request_from_aws_request(self, aws_request):
url_parts = urlsplit(aws_request.url)
crt_path = url_parts.path if url_parts.path else '/'
if aws_request.params:
array = []
for param, value in aws_request.params.items():
value = str(value)
array.append(f'{param}={value}')
crt_path = crt_path + '?' + '&'.join(array)
elif url_parts.query:
crt_path = f'{crt_path}?{url_parts.query}'
crt_headers = awscrt.http.HttpHeaders(aws_request.headers.items())
# CRT requires body (if it exists) to be an I/O stream.
crt_body_stream = None
if aws_request.body:
if hasattr(aws_request.body, 'seek'):
crt_body_stream = aws_request.body
else:
crt_body_stream = BytesIO(aws_request.body)
crt_request = awscrt.http.HttpRequest(
method=aws_request.method,
path=crt_path,
headers=crt_headers,
body_stream=crt_body_stream,
)
return crt_request
def _apply_signing_changes(self, aws_request, signed_crt_request):
# Apply changes from signed CRT request to the AWSRequest
aws_request.headers = HTTPHeaders.from_pairs(
list(signed_crt_request.headers)
)
def _should_sign_header(self, name, **kwargs):
return name.lower() not in SIGNED_HEADERS_BLACKLIST
def _modify_request_before_signing(self, request):
# This could be a retry. Make sure the previous
# authorization headers are removed first.
for h in self._PRESIGNED_HEADERS_BLOCKLIST:
if h in request.headers:
del request.headers[h]
# If necessary, add the host header
if 'host' not in request.headers:
request.headers['host'] = _host_from_url(request.url)
def _get_existing_sha256(self, request):
return request.headers.get('X-Amz-Content-SHA256')
def _should_sha256_sign_payload(self, request):
# Payloads will always be signed over insecure connections.
if not request.url.startswith('https'):
return True
# Certain operations may have payload signing disabled by default.
# Since we don't have access to the operation model, we pass in this
# bit of metadata through the request context.
return request.context.get('payload_signing_enabled', True)
def _should_add_content_sha256_header(self, explicit_payload):
# only add X-Amz-Content-SHA256 header if payload is explicitly set
return explicit_payload is not None
class CrtS3SigV4Auth(CrtSigV4Auth):
# For S3, we do not normalize the path.
_USE_DOUBLE_URI_ENCODE = False
_SHOULD_NORMALIZE_URI_PATH = False
def _get_existing_sha256(self, request):
# always recalculate
return None
def _should_sha256_sign_payload(self, request):
# S3 allows optional body signing, so to minimize the performance
# impact, we opt to not SHA256 sign the body on streaming uploads,
# provided that we're on https.
client_config = request.context.get('client_config')
s3_config = getattr(client_config, 's3', None)
# The config could be None if it isn't set, or if the customer sets it
# to None.
if s3_config is None:
s3_config = {}
# The explicit configuration takes precedence over any implicit
# configuration.
sign_payload = s3_config.get('payload_signing_enabled', None)
if sign_payload is not None:
return sign_payload
# We require that both a checksum be present and https be enabled
# to implicitly disable body signing. The combination of TLS and
# a checksum is sufficiently secure and durable for us to be
# confident in the request without body signing.
checksum_header = 'Content-MD5'
checksum_context = request.context.get('checksum', {})
algorithm = checksum_context.get('request_algorithm')
if isinstance(algorithm, dict) and algorithm.get('in') == 'header':
checksum_header = algorithm['name']
if (
not request.url.startswith('https')
or checksum_header not in request.headers
):
return True
# If the input is streaming we disable body signing by default.
if request.context.get('has_streaming_input', False):
return False
# If the S3-specific checks had no results, delegate to the generic
# checks.
return super()._should_sha256_sign_payload(request)
def _should_add_content_sha256_header(self, explicit_payload):
# Always add X-Amz-Content-SHA256 header
return True
class CrtSigV4AsymAuth(BaseSigner):
REQUIRES_REGION = True
_PRESIGNED_HEADERS_BLOCKLIST = [
'Authorization',
'X-Amz-Date',
'X-Amz-Content-SHA256',
'X-Amz-Security-Token',
]
_SIGNATURE_TYPE = awscrt.auth.AwsSignatureType.HTTP_REQUEST_HEADERS
_USE_DOUBLE_URI_ENCODE = True
_SHOULD_NORMALIZE_URI_PATH = True
def __init__(self, credentials, service_name, region_name):
self.credentials = credentials
self._service_name = service_name
self._region_name = region_name
self._expiration_in_seconds = None
def add_auth(self, request):
if self.credentials is None:
raise NoCredentialsError()
# Use utcnow() because that's what gets mocked by tests, but set
# timezone because CRT assumes naive datetime is local time.
datetime_now = datetime.datetime.utcnow().replace(
tzinfo=datetime.timezone.utc
)
# Use existing 'X-Amz-Content-SHA256' header if able
existing_sha256 = self._get_existing_sha256(request)
self._modify_request_before_signing(request)
credentials_provider = awscrt.auth.AwsCredentialsProvider.new_static(
access_key_id=self.credentials.access_key,
secret_access_key=self.credentials.secret_key,
session_token=self.credentials.token,
)
if self._is_streaming_checksum_payload(request):
explicit_payload = STREAMING_UNSIGNED_PAYLOAD_TRAILER
elif self._should_sha256_sign_payload(request):
if existing_sha256:
explicit_payload = existing_sha256
else:
explicit_payload = None # to be calculated during signing
else:
explicit_payload = UNSIGNED_PAYLOAD
if self._should_add_content_sha256_header(explicit_payload):
body_header = (
awscrt.auth.AwsSignedBodyHeaderType.X_AMZ_CONTENT_SHA_256
)
else:
body_header = awscrt.auth.AwsSignedBodyHeaderType.NONE
signing_config = awscrt.auth.AwsSigningConfig(
algorithm=awscrt.auth.AwsSigningAlgorithm.V4_ASYMMETRIC,
signature_type=self._SIGNATURE_TYPE,
credentials_provider=credentials_provider,
region=self._region_name,
service=self._service_name,
date=datetime_now,
should_sign_header=self._should_sign_header,
use_double_uri_encode=self._USE_DOUBLE_URI_ENCODE,
should_normalize_uri_path=self._SHOULD_NORMALIZE_URI_PATH,
signed_body_value=explicit_payload,
signed_body_header_type=body_header,
expiration_in_seconds=self._expiration_in_seconds,
)
crt_request = self._crt_request_from_aws_request(request)
future = awscrt.auth.aws_sign_request(crt_request, signing_config)
future.result()
self._apply_signing_changes(request, crt_request)
def _crt_request_from_aws_request(self, aws_request):
url_parts = urlsplit(aws_request.url)
crt_path = url_parts.path if url_parts.path else '/'
if aws_request.params:
array = []
for param, value in aws_request.params.items():
value = str(value)
array.append(f'{param}={value}')
crt_path = crt_path + '?' + '&'.join(array)
elif url_parts.query:
crt_path = f'{crt_path}?{url_parts.query}'
crt_headers = awscrt.http.HttpHeaders(aws_request.headers.items())
# CRT requires body (if it exists) to be an I/O stream.
crt_body_stream = None
if aws_request.body:
if hasattr(aws_request.body, 'seek'):
crt_body_stream = aws_request.body
else:
crt_body_stream = BytesIO(aws_request.body)
crt_request = awscrt.http.HttpRequest(
method=aws_request.method,
path=crt_path,
headers=crt_headers,
body_stream=crt_body_stream,
)
return crt_request
def _apply_signing_changes(self, aws_request, signed_crt_request):
# Apply changes from signed CRT request to the AWSRequest
aws_request.headers = HTTPHeaders.from_pairs(
list(signed_crt_request.headers)
)
def _should_sign_header(self, name, **kwargs):
return name.lower() not in SIGNED_HEADERS_BLACKLIST
def _modify_request_before_signing(self, request):
# This could be a retry. Make sure the previous
# authorization headers are removed first.
for h in self._PRESIGNED_HEADERS_BLOCKLIST:
if h in request.headers:
del request.headers[h]
# If necessary, add the host header
if 'host' not in request.headers:
request.headers['host'] = _host_from_url(request.url)
def _get_existing_sha256(self, request):
return request.headers.get('X-Amz-Content-SHA256')
def _is_streaming_checksum_payload(self, request):
checksum_context = request.context.get('checksum', {})
algorithm = checksum_context.get('request_algorithm')
return isinstance(algorithm, dict) and algorithm.get('in') == 'trailer'
def _should_sha256_sign_payload(self, request):
# Payloads will always be signed over insecure connections.
if not request.url.startswith('https'):
return True
# Certain operations may have payload signing disabled by default.
# Since we don't have access to the operation model, we pass in this
# bit of metadata through the request context.
return request.context.get('payload_signing_enabled', True)
def _should_add_content_sha256_header(self, explicit_payload):
# only add X-Amz-Content-SHA256 header if payload is explicitly set
return explicit_payload is not None
class CrtS3SigV4AsymAuth(CrtSigV4AsymAuth):
# For S3, we do not normalize the path.
_USE_DOUBLE_URI_ENCODE = False
_SHOULD_NORMALIZE_URI_PATH = False
def _get_existing_sha256(self, request):
# always recalculate
return None
def _should_sha256_sign_payload(self, request):
# S3 allows optional body signing, so to minimize the performance
# impact, we opt to not SHA256 sign the body on streaming uploads,
# provided that we're on https.
client_config = request.context.get('client_config')
s3_config = getattr(client_config, 's3', None)
# The config could be None if it isn't set, or if the customer sets it
# to None.
if s3_config is None:
s3_config = {}
# The explicit configuration takes precedence over any implicit
# configuration.
sign_payload = s3_config.get('payload_signing_enabled', None)
if sign_payload is not None:
return sign_payload
# We require that both content-md5 be present and https be enabled
# to implicitly disable body signing. The combination of TLS and
# content-md5 is sufficiently secure and durable for us to be
# confident in the request without body signing.
if (
not request.url.startswith('https')
or 'Content-MD5' not in request.headers
):
return True
# If the input is streaming we disable body signing by default.
if request.context.get('has_streaming_input', False):
return False
# If the S3-specific checks had no results, delegate to the generic
# checks.
return super()._should_sha256_sign_payload(request)
def _should_add_content_sha256_header(self, explicit_payload):
# Always add X-Amz-Content-SHA256 header
return True
class CrtSigV4AsymQueryAuth(CrtSigV4AsymAuth):
DEFAULT_EXPIRES = 3600
_SIGNATURE_TYPE = awscrt.auth.AwsSignatureType.HTTP_REQUEST_QUERY_PARAMS
def __init__(
self, credentials, service_name, region_name, expires=DEFAULT_EXPIRES
):
super().__init__(credentials, service_name, region_name)
self._expiration_in_seconds = expires
def _modify_request_before_signing(self, request):
super()._modify_request_before_signing(request)
# We automatically set this header, so if it's the auto-set value we
# want to get rid of it since it doesn't make sense for presigned urls.
content_type = request.headers.get('content-type')
if content_type == 'application/x-www-form-urlencoded; charset=utf-8':
del request.headers['content-type']
# Now parse the original query string to a dict, inject our new query
# params, and serialize back to a query string.
url_parts = urlsplit(request.url)
# parse_qs makes each value a list, but in our case we know we won't
# have repeated keys so we know we have single element lists which we
# can convert back to scalar values.
query_string_parts = parse_qs(url_parts.query, keep_blank_values=True)
query_dict = {k: v[0] for k, v in query_string_parts.items()}
# The spec is particular about this. It *has* to be:
# https://<endpoint>?<operation params>&<auth params>
# You can't mix the two types of params together, i.e just keep doing
# new_query_params.update(op_params)
# new_query_params.update(auth_params)
# percent_encode_sequence(new_query_params)
if request.data:
# We also need to move the body params into the query string. To
# do this, we first have to convert it to a dict.
query_dict.update(_get_body_as_dict(request))
request.data = ''
new_query_string = percent_encode_sequence(query_dict)
# url_parts is a tuple (and therefore immutable) so we need to create
# a new url_parts with the new query string.
# <part> - <index>
# scheme - 0
# netloc - 1
# path - 2
# query - 3 <-- we're replacing this.
# fragment - 4
p = url_parts
new_url_parts = (p[0], p[1], p[2], new_query_string, p[4])
request.url = urlunsplit(new_url_parts)
def _apply_signing_changes(self, aws_request, signed_crt_request):
# Apply changes from signed CRT request to the AWSRequest
super()._apply_signing_changes(aws_request, signed_crt_request)
signed_query = urlsplit(signed_crt_request.path).query
p = urlsplit(aws_request.url)
# urlsplit() returns a tuple (and therefore immutable) so we
# need to create new url with the new query string.
# <part> - <index>
# scheme - 0
# netloc - 1
# path - 2
# query - 3 <-- we're replacing this.
# fragment - 4
aws_request.url = urlunsplit((p[0], p[1], p[2], signed_query, p[4]))
class CrtS3SigV4AsymQueryAuth(CrtSigV4AsymQueryAuth):
"""S3 SigV4A auth using query parameters.
This signer will sign a request using query parameters and signature
version 4A, i.e a "presigned url" signer.
"""
# For S3, we do not normalize the path.
_USE_DOUBLE_URI_ENCODE = False
_SHOULD_NORMALIZE_URI_PATH = False
def _should_sha256_sign_payload(self, request):
# From the doc link above:
# "You don't include a payload hash in the Canonical Request, because
# when you create a presigned URL, you don't know anything about the
# payload. Instead, you use a constant string "UNSIGNED-PAYLOAD".
return False
def _should_add_content_sha256_header(self, explicit_payload):
# Never add X-Amz-Content-SHA256 header
return False
class CrtSigV4QueryAuth(CrtSigV4Auth):
DEFAULT_EXPIRES = 3600
_SIGNATURE_TYPE = awscrt.auth.AwsSignatureType.HTTP_REQUEST_QUERY_PARAMS
def __init__(
self, credentials, service_name, region_name, expires=DEFAULT_EXPIRES
):
super().__init__(credentials, service_name, region_name)
self._expiration_in_seconds = expires
def _modify_request_before_signing(self, request):
super()._modify_request_before_signing(request)
# We automatically set this header, so if it's the auto-set value we
# want to get rid of it since it doesn't make sense for presigned urls.
content_type = request.headers.get('content-type')
if content_type == 'application/x-www-form-urlencoded; charset=utf-8':
del request.headers['content-type']
# Now parse the original query string to a dict, inject our new query
# params, and serialize back to a query string.
url_parts = urlsplit(request.url)
# parse_qs makes each value a list, but in our case we know we won't
# have repeated keys so we know we have single element lists which we
# can convert back to scalar values.
query_dict = {
k: v[0]
for k, v in parse_qs(
url_parts.query, keep_blank_values=True
).items()
}
if request.params:
query_dict.update(request.params)
request.params = {}
# The spec is particular about this. It *has* to be:
# https://<endpoint>?<operation params>&<auth params>
# You can't mix the two types of params together, i.e just keep doing
# new_query_params.update(op_params)
# new_query_params.update(auth_params)
# percent_encode_sequence(new_query_params)
if request.data:
# We also need to move the body params into the query string. To
# do this, we first have to convert it to a dict.
query_dict.update(_get_body_as_dict(request))
request.data = ''
new_query_string = percent_encode_sequence(query_dict)
# url_parts is a tuple (and therefore immutable) so we need to create
# a new url_parts with the new query string.
# <part> - <index>
# scheme - 0
# netloc - 1
# path - 2
# query - 3 <-- we're replacing this.
# fragment - 4
p = url_parts
new_url_parts = (p[0], p[1], p[2], new_query_string, p[4])
request.url = urlunsplit(new_url_parts)
def _apply_signing_changes(self, aws_request, signed_crt_request):
# Apply changes from signed CRT request to the AWSRequest
super()._apply_signing_changes(aws_request, signed_crt_request)
signed_query = urlsplit(signed_crt_request.path).query
p = urlsplit(aws_request.url)
# urlsplit() returns a tuple (and therefore immutable) so we
# need to create new url with the new query string.
# <part> - <index>
# scheme - 0
# netloc - 1
# path - 2
# query - 3 <-- we're replacing this.
# fragment - 4
aws_request.url = urlunsplit((p[0], p[1], p[2], signed_query, p[4]))
class CrtS3SigV4QueryAuth(CrtSigV4QueryAuth):
"""S3 SigV4 auth using query parameters.
This signer will sign a request using query parameters and signature
version 4, i.e a "presigned url" signer.
Based off of:
http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html
"""
# For S3, we do not normalize the path.
_USE_DOUBLE_URI_ENCODE = False
_SHOULD_NORMALIZE_URI_PATH = False
def _should_sha256_sign_payload(self, request):
# From the doc link above:
# "You don't include a payload hash in the Canonical Request, because
# when you create a presigned URL, you don't know anything about the
# payload. Instead, you use a constant string "UNSIGNED-PAYLOAD".
return False
def _should_add_content_sha256_header(self, explicit_payload):
# Never add X-Amz-Content-SHA256 header
return False
# Defined at the bottom of module to ensure all Auth
# classes are defined.
CRT_AUTH_TYPE_MAPS = {
'v4': CrtSigV4Auth,
'v4-query': CrtSigV4QueryAuth,
'v4a': CrtSigV4AsymAuth,
's3v4': CrtS3SigV4Auth,
's3v4-query': CrtS3SigV4QueryAuth,
's3v4a': CrtS3SigV4AsymAuth,
's3v4a-query': CrtS3SigV4AsymQueryAuth,
}

View File

@@ -0,0 +1,292 @@
{
"definitions": {
"throttling": {
"applies_when": {
"response": {
"service_error_code": "Throttling",
"http_status_code": 400
}
}
},
"throttling_exception": {
"applies_when": {
"response": {
"service_error_code": "ThrottlingException",
"http_status_code": 400
}
}
},
"throttled_exception": {
"applies_when": {
"response": {
"service_error_code": "ThrottledException",
"http_status_code": 400
}
}
},
"request_throttled_exception": {
"applies_when": {
"response": {
"service_error_code": "RequestThrottledException",
"http_status_code": 400
}
}
},
"too_many_requests": {
"applies_when": {
"response": {
"http_status_code": 429
}
}
},
"general_socket_errors": {
"applies_when": {
"socket_errors": ["GENERAL_CONNECTION_ERROR"]
}
},
"general_server_error": {
"applies_when": {
"response": {
"http_status_code": 500
}
}
},
"bad_gateway": {
"applies_when": {
"response": {
"http_status_code": 502
}
}
},
"service_unavailable": {
"applies_when": {
"response": {
"http_status_code": 503
}
}
},
"gateway_timeout": {
"applies_when": {
"response": {
"http_status_code": 504
}
}
},
"limit_exceeded": {
"applies_when": {
"response": {
"http_status_code": 509
}
}
},
"throughput_exceeded": {
"applies_when": {
"response": {
"service_error_code": "ProvisionedThroughputExceededException",
"http_status_code": 400
}
}
}
},
"retry": {
"__default__": {
"max_attempts": 5,
"delay": {
"type": "exponential",
"base": "rand",
"growth_factor": 2
},
"policies": {
"general_socket_errors": {"$ref": "general_socket_errors"},
"general_server_error": {"$ref": "general_server_error"},
"bad_gateway": {"$ref": "bad_gateway"},
"service_unavailable": {"$ref": "service_unavailable"},
"gateway_timeout": {"$ref": "gateway_timeout"},
"limit_exceeded": {"$ref": "limit_exceeded"},
"throttling_exception": {"$ref": "throttling_exception"},
"throttled_exception": {"$ref": "throttled_exception"},
"request_throttled_exception": {"$ref": "request_throttled_exception"},
"throttling": {"$ref": "throttling"},
"too_many_requests": {"$ref": "too_many_requests"},
"throughput_exceeded": {"$ref": "throughput_exceeded"}
}
},
"organizations": {
"__default__": {
"policies": {
"too_many_requests": {
"applies_when": {
"response": {
"service_error_code": "TooManyRequestsException",
"http_status_code": 400
}
}
}
}
}
},
"dynamodb": {
"__default__": {
"max_attempts": 10,
"delay": {
"type": "exponential",
"base": 0.05,
"growth_factor": 2
},
"policies": {
"still_processing": {
"applies_when": {
"response": {
"service_error_code": "TransactionInProgressException",
"http_status_code": 400
}
}
},
"crc32": {
"applies_when": {
"response": {
"crc32body": "x-amz-crc32"
}
}
}
}
}
},
"ec2": {
"__default__": {
"policies": {
"request_limit_exceeded": {
"applies_when": {
"response": {
"service_error_code": "RequestLimitExceeded",
"http_status_code": 503
}
}
},
"ec2_throttled_exception": {
"applies_when": {
"response": {
"service_error_code": "EC2ThrottledException",
"http_status_code": 503
}
}
}
}
}
},
"cloudsearch": {
"__default__": {
"policies": {
"request_limit_exceeded": {
"applies_when": {
"response": {
"service_error_code": "BandwidthLimitExceeded",
"http_status_code": 509
}
}
}
}
}
},
"kinesis": {
"__default__": {
"policies": {
"request_limit_exceeded": {
"applies_when": {
"response": {
"service_error_code": "LimitExceededException",
"http_status_code": 400
}
}
}
}
}
},
"sqs": {
"__default__": {
"policies": {
"request_limit_exceeded": {
"applies_when": {
"response": {
"service_error_code": "RequestThrottled",
"http_status_code": 403
}
}
}
}
}
},
"s3": {
"__default__": {
"policies": {
"timeouts": {
"applies_when": {
"response": {
"http_status_code": 400,
"service_error_code": "RequestTimeout"
}
}
},
"contentmd5": {
"applies_when": {
"response": {
"http_status_code": 400,
"service_error_code": "BadDigest"
}
}
}
}
}
},
"glacier": {
"__default__": {
"policies": {
"timeouts": {
"applies_when": {
"response": {
"http_status_code": 408,
"service_error_code": "RequestTimeoutException"
}
}
}
}
}
},
"route53": {
"__default__": {
"policies": {
"request_limit_exceeded": {
"applies_when": {
"response": {
"service_error_code": "Throttling",
"http_status_code": 400
}
}
},
"still_processing": {
"applies_when": {
"response": {
"service_error_code": "PriorRequestNotComplete",
"http_status_code": 400
}
}
}
}
}
},
"sts": {
"__default__": {
"policies": {
"idp_unreachable_error": {
"applies_when": {
"response": {
"service_error_code": "IDPCommunicationError",
"http_status_code": 400
}
}
}
}
}
}
}
}

View File

@@ -0,0 +1,5 @@
{
"version": "1.0",
"examples": {
}
}

View File

@@ -0,0 +1,64 @@
{
"pagination": {
"ListAnalyzedResources": {
"input_token": "nextToken",
"output_token": "nextToken",
"limit_key": "maxResults",
"result_key": "analyzedResources"
},
"ListAnalyzers": {
"input_token": "nextToken",
"output_token": "nextToken",
"limit_key": "maxResults",
"result_key": "analyzers"
},
"ListArchiveRules": {
"input_token": "nextToken",
"output_token": "nextToken",
"limit_key": "maxResults",
"result_key": "archiveRules"
},
"ListFindings": {
"input_token": "nextToken",
"output_token": "nextToken",
"limit_key": "maxResults",
"result_key": "findings"
},
"ListAccessPreviewFindings": {
"input_token": "nextToken",
"output_token": "nextToken",
"limit_key": "maxResults",
"result_key": "findings"
},
"ListAccessPreviews": {
"input_token": "nextToken",
"output_token": "nextToken",
"limit_key": "maxResults",
"result_key": "accessPreviews"
},
"ValidatePolicy": {
"input_token": "nextToken",
"output_token": "nextToken",
"limit_key": "maxResults",
"result_key": "findings"
},
"ListPolicyGenerations": {
"input_token": "nextToken",
"output_token": "nextToken",
"limit_key": "maxResults",
"result_key": "policyGenerations"
},
"GetFindingV2": {
"input_token": "nextToken",
"output_token": "nextToken",
"limit_key": "maxResults",
"result_key": "findingDetails"
},
"ListFindingsV2": {
"input_token": "nextToken",
"output_token": "nextToken",
"limit_key": "maxResults",
"result_key": "findings"
}
}
}

View File

@@ -0,0 +1,21 @@
{
"version": 1.0,
"merge": {
"pagination": {
"GetFindingV2": {
"non_aggregate_keys": [
"resource",
"status",
"error",
"createdAt",
"resourceType",
"findingType",
"resourceOwnerAccount",
"analyzedAt",
"id",
"updatedAt"
]
}
}
}
}

View File

@@ -0,0 +1,5 @@
{
"version": "1.0",
"examples": {
}
}

View File

@@ -0,0 +1,10 @@
{
"pagination": {
"ListRegions": {
"input_token": "NextToken",
"output_token": "NextToken",
"limit_key": "MaxResults",
"result_key": "Regions"
}
}
}

View File

@@ -0,0 +1,5 @@
{
"version": "1.0",
"examples": {
}
}

View File

@@ -0,0 +1,22 @@
{
"pagination": {
"ListCertificateAuthorities": {
"input_token": "NextToken",
"limit_key": "MaxResults",
"output_token": "NextToken",
"result_key": "CertificateAuthorities"
},
"ListTags": {
"input_token": "NextToken",
"limit_key": "MaxResults",
"output_token": "NextToken",
"result_key": "Tags"
},
"ListPermissions": {
"input_token": "NextToken",
"limit_key": "MaxResults",
"output_token": "NextToken",
"result_key": "Permissions"
}
}
}

View File

@@ -0,0 +1,61 @@
{
"version": 2,
"waiters": {
"CertificateAuthorityCSRCreated": {
"description": "Wait until a Certificate Authority CSR is created",
"operation": "GetCertificateAuthorityCsr",
"delay": 3,
"maxAttempts": 60,
"acceptors": [
{
"state": "success",
"matcher": "status",
"expected": 200
},
{
"state": "retry",
"matcher": "error",
"expected": "RequestInProgressException"
}
]
},
"CertificateIssued": {
"description": "Wait until a certificate is issued",
"operation": "GetCertificate",
"delay": 1,
"maxAttempts": 60,
"acceptors": [
{
"state": "success",
"matcher": "status",
"expected": 200
},
{
"state": "retry",
"matcher": "error",
"expected": "RequestInProgressException"
}
]
},
"AuditReportCreated": {
"description": "Wait until a Audit Report is created",
"operation": "DescribeCertificateAuthorityAuditReport",
"delay": 3,
"maxAttempts": 60,
"acceptors": [
{
"state": "success",
"matcher": "path",
"argument": "AuditReportStatus",
"expected": "SUCCESS"
},
{
"state": "failure",
"matcher": "path",
"argument": "AuditReportStatus",
"expected": "FAILED"
}
]
}
}
}

View File

@@ -0,0 +1,5 @@
{
"version": "1.0",
"examples": {
}
}

View File

@@ -0,0 +1,10 @@
{
"pagination": {
"ListCertificates": {
"input_token": "NextToken",
"output_token": "NextToken",
"limit_key": "MaxItems",
"result_key": "CertificateSummaryList"
}
}
}

View File

@@ -0,0 +1,35 @@
{
"version": 2,
"waiters": {
"CertificateValidated": {
"delay": 60,
"maxAttempts": 40,
"operation": "DescribeCertificate",
"acceptors": [
{
"matcher": "pathAll",
"expected": "SUCCESS",
"argument": "Certificate.DomainValidationOptions[].ValidationStatus",
"state": "success"
},
{
"matcher": "pathAny",
"expected": "PENDING_VALIDATION",
"argument": "Certificate.DomainValidationOptions[].ValidationStatus",
"state": "retry"
},
{
"matcher": "path",
"expected": "FAILED",
"argument": "Certificate.Status",
"state": "failure"
},
{
"matcher": "error",
"expected": "ResourceNotFoundException",
"state": "failure"
}
]
}
}
}

View File

@@ -0,0 +1,5 @@
{
"version": "1.0",
"examples": {
}
}

View File

@@ -0,0 +1,82 @@
{
"pagination": {
"ListSkills": {
"result_key": "SkillSummaries",
"output_token": "NextToken",
"input_token": "NextToken",
"limit_key": "MaxResults"
},
"SearchUsers": {
"result_key": "Users",
"output_token": "NextToken",
"input_token": "NextToken",
"limit_key": "MaxResults"
},
"ListTags": {
"result_key": "Tags",
"output_token": "NextToken",
"input_token": "NextToken",
"limit_key": "MaxResults"
},
"SearchProfiles": {
"result_key": "Profiles",
"output_token": "NextToken",
"input_token": "NextToken",
"limit_key": "MaxResults"
},
"SearchSkillGroups": {
"result_key": "SkillGroups",
"output_token": "NextToken",
"input_token": "NextToken",
"limit_key": "MaxResults"
},
"SearchDevices": {
"result_key": "Devices",
"output_token": "NextToken",
"input_token": "NextToken",
"limit_key": "MaxResults"
},
"SearchRooms": {
"result_key": "Rooms",
"output_token": "NextToken",
"input_token": "NextToken",
"limit_key": "MaxResults"
},
"ListBusinessReportSchedules": {
"input_token": "NextToken",
"limit_key": "MaxResults",
"output_token": "NextToken",
"result_key": "BusinessReportSchedules"
},
"ListConferenceProviders": {
"input_token": "NextToken",
"limit_key": "MaxResults",
"output_token": "NextToken",
"result_key": "ConferenceProviders"
},
"ListDeviceEvents": {
"input_token": "NextToken",
"limit_key": "MaxResults",
"output_token": "NextToken",
"result_key": "DeviceEvents"
},
"ListSkillsStoreCategories": {
"input_token": "NextToken",
"limit_key": "MaxResults",
"output_token": "NextToken",
"result_key": "CategoryList"
},
"ListSkillsStoreSkillsByCategory": {
"input_token": "NextToken",
"limit_key": "MaxResults",
"output_token": "NextToken",
"result_key": "SkillsStoreSkills"
},
"ListSmartHomeAppliances": {
"input_token": "NextToken",
"limit_key": "MaxResults",
"output_token": "NextToken",
"result_key": "SmartHomeAppliances"
}
}
}

View File

@@ -0,0 +1,5 @@
{
"version": "1.0",
"examples": {
}
}

View File

@@ -0,0 +1,22 @@
{
"pagination": {
"ListWorkspaces": {
"input_token": "nextToken",
"output_token": "nextToken",
"limit_key": "maxResults",
"result_key": "workspaces"
},
"ListRuleGroupsNamespaces": {
"input_token": "nextToken",
"output_token": "nextToken",
"limit_key": "maxResults",
"result_key": "ruleGroupsNamespaces"
},
"ListScrapers": {
"input_token": "nextToken",
"output_token": "nextToken",
"limit_key": "maxResults",
"result_key": "scrapers"
}
}
}

View File

@@ -0,0 +1,76 @@
{
"version" : 2,
"waiters" : {
"ScraperActive" : {
"description" : "Wait until a scraper reaches ACTIVE status",
"delay" : 2,
"maxAttempts" : 60,
"operation" : "DescribeScraper",
"acceptors" : [ {
"matcher" : "path",
"argument" : "scraper.status.statusCode",
"state" : "success",
"expected" : "ACTIVE"
}, {
"matcher" : "path",
"argument" : "scraper.status.statusCode",
"state" : "failure",
"expected" : "CREATION_FAILED"
} ]
},
"ScraperDeleted" : {
"description" : "Wait until a scraper reaches DELETED status",
"delay" : 2,
"maxAttempts" : 60,
"operation" : "DescribeScraper",
"acceptors" : [ {
"matcher" : "error",
"state" : "success",
"expected" : "ResourceNotFoundException"
}, {
"matcher" : "path",
"argument" : "scraper.status.statusCode",
"state" : "failure",
"expected" : "DELETION_FAILED"
} ]
},
"WorkspaceActive" : {
"description" : "Wait until a workspace reaches ACTIVE status",
"delay" : 2,
"maxAttempts" : 60,
"operation" : "DescribeWorkspace",
"acceptors" : [ {
"matcher" : "path",
"argument" : "workspace.status.statusCode",
"state" : "success",
"expected" : "ACTIVE"
}, {
"matcher" : "path",
"argument" : "workspace.status.statusCode",
"state" : "retry",
"expected" : "UPDATING"
}, {
"matcher" : "path",
"argument" : "workspace.status.statusCode",
"state" : "retry",
"expected" : "CREATING"
} ]
},
"WorkspaceDeleted" : {
"description" : "Wait until a workspace reaches DELETED status",
"delay" : 2,
"maxAttempts" : 60,
"operation" : "DescribeWorkspace",
"acceptors" : [ {
"matcher" : "error",
"state" : "success",
"expected" : "ResourceNotFoundException"
}, {
"matcher" : "path",
"argument" : "workspace.status.statusCode",
"state" : "retry",
"expected" : "DELETING"
} ]
}
}
}

View File

@@ -0,0 +1,5 @@
{
"version": "1.0",
"examples": {
}
}

View File

@@ -0,0 +1,28 @@
{
"pagination": {
"ListApps": {
"input_token": "nextToken",
"limit_key": "maxResults",
"output_token": "nextToken",
"result_key": "apps"
},
"ListBranches": {
"input_token": "nextToken",
"limit_key": "maxResults",
"output_token": "nextToken",
"result_key": "branches"
},
"ListDomainAssociations": {
"input_token": "nextToken",
"limit_key": "maxResults",
"output_token": "nextToken",
"result_key": "domainAssociations"
},
"ListJobs": {
"input_token": "nextToken",
"limit_key": "maxResults",
"output_token": "nextToken",
"result_key": "jobSummaries"
}
}
}

View File

@@ -0,0 +1,10 @@
{
"pagination": {
"ListBackendJobs": {
"input_token": "NextToken",
"output_token": "NextToken",
"limit_key": "MaxResults",
"result_key": "Jobs"
}
}
}

View File

@@ -0,0 +1,5 @@
{
"version": "1.0",
"examples": {
}
}

View File

@@ -0,0 +1,43 @@
{
"pagination": {
"ListComponents": {
"input_token": "nextToken",
"output_token": "nextToken",
"limit_key": "maxResults",
"result_key": "entities"
},
"ListThemes": {
"input_token": "nextToken",
"output_token": "nextToken",
"limit_key": "maxResults",
"result_key": "entities"
},
"ExportComponents": {
"input_token": "nextToken",
"output_token": "nextToken",
"result_key": "entities"
},
"ExportThemes": {
"input_token": "nextToken",
"output_token": "nextToken",
"result_key": "entities"
},
"ExportForms": {
"input_token": "nextToken",
"output_token": "nextToken",
"result_key": "entities"
},
"ListForms": {
"input_token": "nextToken",
"output_token": "nextToken",
"limit_key": "maxResults",
"result_key": "entities"
},
"ListCodegenJobs": {
"input_token": "nextToken",
"output_token": "nextToken",
"limit_key": "maxResults",
"result_key": "entities"
}
}
}

View File

@@ -0,0 +1,5 @@
{
"version": 2,
"waiters": {
}
}

View File

@@ -0,0 +1,5 @@
{
"version": "1.0",
"examples": {
}
}

View File

@@ -0,0 +1,117 @@
{
"pagination": {
"GetApiKeys": {
"input_token": "position",
"output_token": "position",
"limit_key": "limit",
"result_key": "items"
},
"GetBasePathMappings": {
"input_token": "position",
"output_token": "position",
"limit_key": "limit",
"result_key": "items"
},
"GetClientCertificates": {
"input_token": "position",
"output_token": "position",
"limit_key": "limit",
"result_key": "items"
},
"GetDeployments": {
"input_token": "position",
"output_token": "position",
"limit_key": "limit",
"result_key": "items"
},
"GetDomainNames": {
"input_token": "position",
"output_token": "position",
"limit_key": "limit",
"result_key": "items"
},
"GetModels": {
"input_token": "position",
"output_token": "position",
"limit_key": "limit",
"result_key": "items"
},
"GetResources": {
"input_token": "position",
"output_token": "position",
"limit_key": "limit",
"result_key": "items"
},
"GetRestApis": {
"input_token": "position",
"output_token": "position",
"limit_key": "limit",
"result_key": "items"
},
"GetUsage": {
"input_token": "position",
"output_token": "position",
"limit_key": "limit",
"result_key": "items",
"non_aggregate_keys": [
"usagePlanId",
"startDate",
"endDate"
]
},
"GetUsagePlans": {
"input_token": "position",
"output_token": "position",
"limit_key": "limit",
"result_key": "items"
},
"GetUsagePlanKeys": {
"input_token": "position",
"output_token": "position",
"limit_key": "limit",
"result_key": "items"
},
"GetVpcLinks": {
"input_token": "position",
"limit_key": "limit",
"output_token": "position",
"result_key": "items"
},
"GetAuthorizers": {
"input_token": "position",
"limit_key": "limit",
"output_token": "position",
"result_key": "items"
},
"GetDocumentationParts": {
"input_token": "position",
"limit_key": "limit",
"output_token": "position",
"result_key": "items"
},
"GetDocumentationVersions": {
"input_token": "position",
"limit_key": "limit",
"output_token": "position",
"result_key": "items"
},
"GetGatewayResponses": {
"input_token": "position",
"limit_key": "limit",
"output_token": "position",
"result_key": "items"
},
"GetRequestValidators": {
"input_token": "position",
"limit_key": "limit",
"output_token": "position",
"result_key": "items"
},
"GetSdkTypes": {
"input_token": "position",
"limit_key": "limit",
"output_token": "position",
"result_key": "items"
}
}
}

Some files were not shown because too many files have changed in this diff Show More