diff --git a/.cfnlintrc.yaml b/.cfnlintrc.yaml index a5a54820e..8b0d3c164 100644 --- a/.cfnlintrc.yaml +++ b/.cfnlintrc.yaml @@ -131,11 +131,16 @@ ignore_templates: - tests/translator/output/**/function_with_mq.json # Property "EventSourceArn" can Fn::GetAtt to a resource of types [AWS::DynamoDB::GlobalTable, AWS::DynamoDB::Table, AWS::Kinesis::Stream, AWS::Kinesis::StreamConsumer, AWS::SQS::Queue] - tests/translator/output/**/function_with_mq_using_autogen_role.json # Property "EventSourceArn" can Fn::GetAtt to a resource of types [AWS::DynamoDB::GlobalTable, AWS::DynamoDB::Table, AWS::Kinesis::Stream, AWS::Kinesis::StreamConsumer, AWS::SQS::Queue] - tests/translator/output/**/function_with_recursive_loop.json # Invalid Property Resources/RecursiveLoopParameterFunction/Properties/RecursiveLoop + - tests/translator/output/**/function_with_sourcekmskeyarn.json # Invalid Property Resources/SourceKMSKeyArnParameterFunction/Properties/SourceKMSKeyArn - tests/translator/output/**/function_with_tracing.json # Obsolete DependsOn on resource - tests/translator/output/**/api_with_propagate_tags.json # TODO: Intentional error transform tests. Will be updated. - tests/translator/output/**/function_with_intrinsics_resource_attribute.json # CFN now supports intrinsics in DeletionPolicy + - tests/translator/output/**/function_with_snapstart.json # Snapstart intentionally not attached to a lambda version which causes lint issues + - tests/translator/output/**/managed_policies_everything.json # intentionally contains wrong arns ignore_checks: - E2531 # Deprecated runtime; not relevant for transform tests + - E2533 # Another deprecated runtime; not relevant for transform tests - W2531 # EOL runtime; not relevant for transform tests - E3001 # Invalid or unsupported Type; common in transform tests since they focus on SAM resources - W2001 # Parameter not used + - E3006 # Resource type check; we have some Foo Bar resources diff --git a/DEVELOPMENT_GUIDE.md b/DEVELOPMENT_GUIDE.md index 2a89debf6..10fcec056 100644 --- a/DEVELOPMENT_GUIDE.md +++ b/DEVELOPMENT_GUIDE.md @@ -59,10 +59,10 @@ We format our code using [Black](https://github.com/python/black) and verify the during PR checks. Black will be installed automatically with `make init`. After installing, you can run our formatting through our Makefile by `make format` or integrating Black directly in your favorite IDE (instructions -can be found [here](https://black.readthedocs.io/en/stable/editor_integration.html)) +can be found [here](https://black.readthedocs.io/en/stable/integrations/editors.html)) ##### (Workaround) Integrating Black directly in your favorite IDE -Since black is installed in virtualenv, when you follow [this instruction](https://black.readthedocs.io/en/stable/editor_integration.html), `which black` might give you this +Since black is installed in virtualenv, when you follow [this instruction](https://black.readthedocs.io/en/stable/integrations/editors.html), `which black` might give you this ```bash (sam38) $ where black diff --git a/bin/add_transform_test.py b/bin/add_transform_test.py index 99139b74e..ec85b53c9 100755 --- a/bin/add_transform_test.py +++ b/bin/add_transform_test.py @@ -68,8 +68,13 @@ def get_input_file_path() -> Path: def copy_input_file_to_transform_test_dir(input_file_path: Path, transform_test_input_path: Path) -> None: - shutil.copyfile(input_file_path, transform_test_input_path) - print(f"Transform Test input file generated {transform_test_input_path}") + try: + shutil.copyfile(input_file_path, transform_test_input_path) + print(f"Transform Test input file generated {transform_test_input_path}") + except shutil.SameFileError: + print(f"Source and destination are the same file: {input_file_path}") + except Exception as e: + raise e def verify_input_template(input_file_path: Path) -> None: @@ -99,7 +104,9 @@ def main() -> None: verify_input_template(input_file_path) transform_test_input_path = TRANSFORM_TEST_DIR / "input" / (file_basename + ".yaml") - copy_input_file_to_transform_test_dir(input_file_path, transform_test_input_path) + # check if the template is not already in the transform test input dir + if input_file_path != transform_test_input_path: + copy_input_file_to_transform_test_dir(input_file_path, transform_test_input_path) generate_transform_test_output_files(transform_test_input_path, file_basename) diff --git a/bin/run_cfn_lint.sh b/bin/run_cfn_lint.sh index 4157af06e..eeee7e483 100755 --- a/bin/run_cfn_lint.sh +++ b/bin/run_cfn_lint.sh @@ -9,7 +9,7 @@ if [ ! -d "${VENV}" ]; then python3 -m venv "${VENV}" fi -"${VENV}/bin/python" -m pip install cfn-lint==0.75.0 --upgrade --quiet +"${VENV}/bin/python" -m pip install cfn-lint --upgrade --quiet # update cfn schema "${VENV}/bin/cfn-lint" -u "${VENV}/bin/cfn-lint" --format parseable diff --git a/integration/resources/templates/combination/all_policy_templates.yaml b/integration/resources/templates/combination/all_policy_templates.yaml index e571be561..cf8ffc7d7 100644 --- a/integration/resources/templates/combination/all_policy_templates.yaml +++ b/integration/resources/templates/combination/all_policy_templates.yaml @@ -8,7 +8,7 @@ Resources: Properties: CodeUri: ${codeuri} Handler: hello.handler - Runtime: python3.8 + Runtime: python3.11 Policies: - SQSPollerPolicy: @@ -123,7 +123,7 @@ Resources: Properties: CodeUri: ${codeuri} Handler: hello.handler - Runtime: python3.8 + Runtime: python3.11 Policies: - SESEmailTemplateCrudPolicy: {} @@ -187,7 +187,7 @@ Resources: Properties: CodeUri: ${codeuri} Handler: hello.handler - Runtime: python3.8 + Runtime: python3.11 Policies: - ElasticMapReduceModifyInstanceFleetPolicy: ClusterId: name diff --git a/integration/resources/templates/combination/api_with_authorizers_invokefunction_set_none.yaml b/integration/resources/templates/combination/api_with_authorizers_invokefunction_set_none.yaml index fe2fd256f..f3c42ae14 100644 --- a/integration/resources/templates/combination/api_with_authorizers_invokefunction_set_none.yaml +++ b/integration/resources/templates/combination/api_with_authorizers_invokefunction_set_none.yaml @@ -12,7 +12,7 @@ Resources: InlineCode: | print("hello") Handler: index.handler - Runtime: python3.8 + Runtime: python3.11 Events: API3: Type: Api diff --git a/integration/resources/templates/combination/api_with_authorizers_max.yaml b/integration/resources/templates/combination/api_with_authorizers_max.yaml index 68ac6f78b..4e1062601 100644 --- a/integration/resources/templates/combination/api_with_authorizers_max.yaml +++ b/integration/resources/templates/combination/api_with_authorizers_max.yaml @@ -125,8 +125,8 @@ Resources: Type: AWS::IAM::Role Properties: ManagedPolicyArns: - - arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole - - arn:aws:iam::aws:policy/service-role/AWSLambdaRole + - !Sub arn:${AWS::Partition}:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole + - !Sub arn:${AWS::Partition}:iam::aws:policy/service-role/AWSLambdaRole AssumeRolePolicyDocument: Version: '2012-10-17' Statement: diff --git a/integration/resources/templates/combination/api_with_authorizers_max_openapi.yaml b/integration/resources/templates/combination/api_with_authorizers_max_openapi.yaml index 008d314a9..3066930c2 100644 --- a/integration/resources/templates/combination/api_with_authorizers_max_openapi.yaml +++ b/integration/resources/templates/combination/api_with_authorizers_max_openapi.yaml @@ -138,8 +138,8 @@ Resources: Type: AWS::IAM::Role Properties: ManagedPolicyArns: - - arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole - - arn:aws:iam::aws:policy/service-role/AWSLambdaRole + - !Sub arn:${AWS::Partition}:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole + - !Sub arn:${AWS::Partition}:iam::aws:policy/service-role/AWSLambdaRole AssumeRolePolicyDocument: Version: '2012-10-17' Statement: diff --git a/integration/resources/templates/combination/connector_function_to_location_place_index.yaml b/integration/resources/templates/combination/connector_function_to_location_place_index.yaml index 52c21c0a6..ba7b579e8 100644 --- a/integration/resources/templates/combination/connector_function_to_location_place_index.yaml +++ b/integration/resources/templates/combination/connector_function_to_location_place_index.yaml @@ -9,7 +9,7 @@ Resources: TriggerFunction: Type: AWS::Serverless::Function Properties: - Runtime: python3.8 + Runtime: python3.11 Handler: index.handler InlineCode: | import boto3 diff --git a/integration/resources/templates/combination/depends_on.yaml b/integration/resources/templates/combination/depends_on.yaml index 036b9b939..d1949b3b2 100644 --- a/integration/resources/templates/combination/depends_on.yaml +++ b/integration/resources/templates/combination/depends_on.yaml @@ -12,7 +12,7 @@ Resources: Role: Fn::GetAtt: LambdaRole.Arn Handler: lambda_function.lambda_handler - Runtime: python3.8 + Runtime: python3.11 Timeout: 15 CodeUri: ${codeuri} diff --git a/integration/resources/templates/combination/function_with_custom_code_deploy.yaml b/integration/resources/templates/combination/function_with_custom_code_deploy.yaml index b05ad644b..d85650312 100644 --- a/integration/resources/templates/combination/function_with_custom_code_deploy.yaml +++ b/integration/resources/templates/combination/function_with_custom_code_deploy.yaml @@ -10,7 +10,7 @@ Resources: Properties: CodeUri: ${codeuri} Handler: index.handler - Runtime: python3.8 + Runtime: python3.11 AutoPublishAlias: Live diff --git a/integration/resources/templates/combination/function_with_deployment_basic.yaml b/integration/resources/templates/combination/function_with_deployment_basic.yaml index 4fc5f175b..f02fe7bad 100644 --- a/integration/resources/templates/combination/function_with_deployment_basic.yaml +++ b/integration/resources/templates/combination/function_with_deployment_basic.yaml @@ -5,7 +5,7 @@ Resources: Properties: CodeUri: ${codeuri} Handler: index.handler - Runtime: python3.8 + Runtime: python3.11 AutoPublishAlias: Live diff --git a/integration/resources/templates/combination/function_with_deployment_default_role_managed_policy.yaml b/integration/resources/templates/combination/function_with_deployment_default_role_managed_policy.yaml index 7f4bb115f..3540d5d9f 100644 --- a/integration/resources/templates/combination/function_with_deployment_default_role_managed_policy.yaml +++ b/integration/resources/templates/combination/function_with_deployment_default_role_managed_policy.yaml @@ -4,7 +4,7 @@ Resources: Properties: CodeUri: ${codeuri} Handler: index.handler - Runtime: python3.8 + Runtime: python3.11 AutoPublishAlias: Live DeploymentPreference: Type: Canary10Percent5Minutes diff --git a/integration/resources/templates/combination/function_with_deployment_globals.yaml b/integration/resources/templates/combination/function_with_deployment_globals.yaml index 8b8c88e8d..eb0708dd3 100644 --- a/integration/resources/templates/combination/function_with_deployment_globals.yaml +++ b/integration/resources/templates/combination/function_with_deployment_globals.yaml @@ -16,7 +16,7 @@ Resources: Properties: CodeUri: ${codeuri} Handler: index.handler - Runtime: python3.8 + Runtime: python3.11 AutoPublishAlias: Live DeploymentRole: diff --git a/integration/resources/templates/combination/function_with_http_api.yaml b/integration/resources/templates/combination/function_with_http_api.yaml index ae453c88c..0911ddcc9 100644 --- a/integration/resources/templates/combination/function_with_http_api.yaml +++ b/integration/resources/templates/combination/function_with_http_api.yaml @@ -3,7 +3,7 @@ Resources: Type: AWS::Serverless::Function Properties: Handler: index.handler - Runtime: python3.8 + Runtime: python3.11 InlineCode: | def handler(event, context): return {'body': 'Hello World!', 'statusCode': 200} diff --git a/integration/resources/templates/combination/function_with_http_api_default_path.yaml b/integration/resources/templates/combination/function_with_http_api_default_path.yaml index 469e50a9e..bb3234377 100644 --- a/integration/resources/templates/combination/function_with_http_api_default_path.yaml +++ b/integration/resources/templates/combination/function_with_http_api_default_path.yaml @@ -3,7 +3,7 @@ Resources: Type: AWS::Serverless::Function Properties: Handler: index.handler - Runtime: python3.8 + Runtime: python3.11 InlineCode: | def handler(event, context): return {'body': 'Hello World!', 'statusCode': 200} diff --git a/integration/resources/templates/combination/function_with_implicit_http_api.yaml b/integration/resources/templates/combination/function_with_implicit_http_api.yaml index 3da86931b..441217886 100644 --- a/integration/resources/templates/combination/function_with_implicit_http_api.yaml +++ b/integration/resources/templates/combination/function_with_implicit_http_api.yaml @@ -4,7 +4,7 @@ Resources: Type: AWS::Serverless::Function Properties: Handler: index.handler - Runtime: python3.8 + Runtime: python3.11 InlineCode: | def handler(event, context): return {'body': 'Hello World!', 'statusCode': 200} diff --git a/integration/resources/templates/combination/function_with_mq.yaml b/integration/resources/templates/combination/function_with_mq.yaml index fdff7942e..7efeaae4b 100644 --- a/integration/resources/templates/combination/function_with_mq.yaml +++ b/integration/resources/templates/combination/function_with_mq.yaml @@ -101,7 +101,8 @@ Resources: secretsmanager:GetSecretValue] Effect: Allow Resource: '*' - ManagedPolicyArns: [arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole] + ManagedPolicyArns: + - !Sub arn:${AWS::Partition}:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole Tags: - {Value: SAM, Key: lambda:createdBy} diff --git a/integration/resources/templates/combination/function_with_msk.yaml b/integration/resources/templates/combination/function_with_msk.yaml index a15e99b57..5f6eecca8 100644 --- a/integration/resources/templates/combination/function_with_msk.yaml +++ b/integration/resources/templates/combination/function_with_msk.yaml @@ -27,7 +27,8 @@ Resources: logs:CreateLogStream, logs:PutLogEvents] Effect: Allow Resource: '*' - ManagedPolicyArns: [arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole] + ManagedPolicyArns: + - !Sub arn:${AWS::Partition}:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole Tags: - {Value: SAM, Key: lambda:createdBy} diff --git a/integration/resources/templates/combination/function_with_msk_trigger_and_s3_onfailure_events_destinations.yaml b/integration/resources/templates/combination/function_with_msk_trigger_and_s3_onfailure_events_destinations.yaml index 6089c08c0..7ff0e13f2 100644 --- a/integration/resources/templates/combination/function_with_msk_trigger_and_s3_onfailure_events_destinations.yaml +++ b/integration/resources/templates/combination/function_with_msk_trigger_and_s3_onfailure_events_destinations.yaml @@ -27,7 +27,8 @@ Resources: logs:CreateLogStream, logs:PutLogEvents, s3:ListBucket] Effect: Allow Resource: '*' - ManagedPolicyArns: [arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole] + ManagedPolicyArns: + - !Sub arn:${AWS::Partition}:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole Tags: - {Value: SAM, Key: lambda:createdBy} diff --git a/integration/resources/templates/combination/function_with_policy_templates.yaml b/integration/resources/templates/combination/function_with_policy_templates.yaml index bc7b99e09..08d73a8f1 100644 --- a/integration/resources/templates/combination/function_with_policy_templates.yaml +++ b/integration/resources/templates/combination/function_with_policy_templates.yaml @@ -14,7 +14,7 @@ Resources: Properties: CodeUri: ${codeuri} Handler: hello.handler - Runtime: python3.8 + Runtime: python3.11 Policies: - SQSPollerPolicy: QueueName: diff --git a/integration/resources/templates/combination/function_with_resource_refs.yaml b/integration/resources/templates/combination/function_with_resource_refs.yaml index 4ce85a020..e4c1b9aae 100644 --- a/integration/resources/templates/combination/function_with_resource_refs.yaml +++ b/integration/resources/templates/combination/function_with_resource_refs.yaml @@ -10,14 +10,14 @@ Resources: Properties: CodeUri: ${codeuri} Handler: hello.handler - Runtime: python3.8 + Runtime: python3.11 AutoPublishAlias: Live MyOtherFunction: Type: AWS::Serverless::Function Properties: CodeUri: ${codeuri} - Runtime: python3.8 + Runtime: python3.11 Handler: hello.handler Environment: Variables: diff --git a/integration/resources/templates/combination/http_api_with_auth.yaml b/integration/resources/templates/combination/http_api_with_auth.yaml index 9aa90a092..3eb0eedb7 100644 --- a/integration/resources/templates/combination/http_api_with_auth.yaml +++ b/integration/resources/templates/combination/http_api_with_auth.yaml @@ -3,7 +3,7 @@ Resources: Type: AWS::Serverless::Function Properties: Handler: index.handler - Runtime: python3.8 + Runtime: python3.11 InlineCode: | def handler(event, context): return {'body': 'Hello World!', 'statusCode': 200} diff --git a/integration/resources/templates/combination/http_api_with_auth_updated.yaml b/integration/resources/templates/combination/http_api_with_auth_updated.yaml index a1b40ab53..bfc35137e 100644 --- a/integration/resources/templates/combination/http_api_with_auth_updated.yaml +++ b/integration/resources/templates/combination/http_api_with_auth_updated.yaml @@ -3,7 +3,7 @@ Resources: Type: AWS::Serverless::Function Properties: Handler: index.handler - Runtime: python3.8 + Runtime: python3.11 InlineCode: | def handler(event, context): return {'body': 'Hello World!', 'statusCode': 200} diff --git a/integration/resources/templates/combination/http_api_with_fail_on_warnings_and_default_stage_name.yaml b/integration/resources/templates/combination/http_api_with_fail_on_warnings_and_default_stage_name.yaml index a9b1800a0..d54723d42 100644 --- a/integration/resources/templates/combination/http_api_with_fail_on_warnings_and_default_stage_name.yaml +++ b/integration/resources/templates/combination/http_api_with_fail_on_warnings_and_default_stage_name.yaml @@ -16,7 +16,7 @@ Resources: InlineCode: | def handler(event, context): print("Hello, world!") - Runtime: python3.8 + Runtime: python3.11 Architectures: - x86_64 Events: diff --git a/integration/resources/templates/combination/state_machine_with_policy_templates.yaml b/integration/resources/templates/combination/state_machine_with_policy_templates.yaml index 967c8a477..2cf8fe0f0 100644 --- a/integration/resources/templates/combination/state_machine_with_policy_templates.yaml +++ b/integration/resources/templates/combination/state_machine_with_policy_templates.yaml @@ -24,7 +24,7 @@ Resources: Properties: CodeUri: ${codeuri} Handler: hello.handler - Runtime: python3.8 + Runtime: python3.11 MyQueue: Type: AWS::SQS::Queue diff --git a/integration/resources/templates/single/basic_api_with_mode.yaml b/integration/resources/templates/single/basic_api_with_mode.yaml index 2fd8ee154..bf0f60ee1 100644 --- a/integration/resources/templates/single/basic_api_with_mode.yaml +++ b/integration/resources/templates/single/basic_api_with_mode.yaml @@ -9,7 +9,7 @@ Resources: Type: AWS::Serverless::Function Properties: Handler: index.handler - Runtime: python3.8 + Runtime: python3.11 AutoPublishAlias: live InlineCode: | import json @@ -31,6 +31,6 @@ Resources: Outputs: ApiEndpoint: - Value: !Sub "https://${MyApi}.execute-api.${AWS::Region}.amazonaws.com/MyNewStageName" + Value: !Sub "https://${MyApi}.execute-api.${AWS::Region}.${AWS::URLSuffix}/MyNewStageName" Metadata: SamTransformTest: true diff --git a/integration/resources/templates/single/basic_api_with_mode_update.yaml b/integration/resources/templates/single/basic_api_with_mode_update.yaml index 5b6376cbf..4810f2526 100644 --- a/integration/resources/templates/single/basic_api_with_mode_update.yaml +++ b/integration/resources/templates/single/basic_api_with_mode_update.yaml @@ -9,7 +9,7 @@ Resources: Type: AWS::Serverless::Function Properties: Handler: index.handler - Runtime: python3.8 + Runtime: python3.11 AutoPublishAlias: live InlineCode: | def handler(event, context): diff --git a/integration/resources/templates/single/function_with_deployment_preference_alarms_intrinsic_if.yaml b/integration/resources/templates/single/function_with_deployment_preference_alarms_intrinsic_if.yaml index a736b3e1d..efd2f8d75 100644 --- a/integration/resources/templates/single/function_with_deployment_preference_alarms_intrinsic_if.yaml +++ b/integration/resources/templates/single/function_with_deployment_preference_alarms_intrinsic_if.yaml @@ -9,7 +9,7 @@ Resources: Properties: CodeUri: ${codeuri} Handler: hello.handler - Runtime: python3.8 + Runtime: python3.11 AutoPublishAlias: live DeploymentPreference: Type: Linear10PercentEvery3Minutes diff --git a/integration/resources/templates/single/function_with_role_path.yaml b/integration/resources/templates/single/function_with_role_path.yaml index fb55ea958..e6e9dca2f 100644 --- a/integration/resources/templates/single/function_with_role_path.yaml +++ b/integration/resources/templates/single/function_with_role_path.yaml @@ -4,7 +4,7 @@ Resources: Properties: CodeUri: ${codeuri} Handler: hello.handler - Runtime: python3.8 + Runtime: python3.11 RolePath: /foo/bar/ Metadata: SamTransformTest: true diff --git a/integration/resources/templates/single/state_machine_with_api.yaml b/integration/resources/templates/single/state_machine_with_api.yaml index 8b3b5d58c..c72a66098 100644 --- a/integration/resources/templates/single/state_machine_with_api.yaml +++ b/integration/resources/templates/single/state_machine_with_api.yaml @@ -14,12 +14,12 @@ Resources: print(event) return "do nothing" Handler: index.handler - Runtime: python3.8 + Runtime: python3.11 Post: Type: AWS::Serverless::StateMachine Properties: Policies: - - arn:aws:iam::aws:policy/AWSLambda_FullAccess + - !Sub arn:${AWS::Partition}:iam::aws:policy/AWSLambda_FullAccess Definition: StartAt: One States: diff --git a/samtranslator/__init__.py b/samtranslator/__init__.py index eb97acb8c..e70b96d07 100644 --- a/samtranslator/__init__.py +++ b/samtranslator/__init__.py @@ -1 +1 @@ -__version__ = "1.91.0" +__version__ = "1.92.0" diff --git a/samtranslator/internal/schema_source/aws_serverless_function.py b/samtranslator/internal/schema_source/aws_serverless_function.py index 74c252dce..bd0740e55 100644 --- a/samtranslator/internal/schema_source/aws_serverless_function.py +++ b/samtranslator/internal/schema_source/aws_serverless_function.py @@ -513,6 +513,7 @@ class ScheduleV2Event(BaseModel): RuntimeManagementConfig = Optional[PassThroughProp] # TODO: check the type LoggingConfig = Optional[PassThroughProp] # TODO: add documentation RecursiveLoop = Optional[PassThroughProp] +SourceKMSKeyArn = Optional[PassThroughProp] class Properties(BaseModel): @@ -640,6 +641,7 @@ class Properties(BaseModel): VpcConfig: Optional[VpcConfig] = prop("VpcConfig") LoggingConfig: Optional[PassThroughProp] # TODO: add documentation RecursiveLoop: Optional[PassThroughProp] # TODO: add documentation + SourceKMSKeyArn: Optional[PassThroughProp] # TODO: add documentation class Globals(BaseModel): @@ -699,6 +701,7 @@ class Globals(BaseModel): RuntimeManagementConfig: Optional[RuntimeManagementConfig] = prop("RuntimeManagementConfig") LoggingConfig: Optional[PassThroughProp] # TODO: add documentation RecursiveLoop: Optional[PassThroughProp] # TODO: add documentation + SourceKMSKeyArn: Optional[PassThroughProp] # TODO: add documentation class Resource(ResourceAttributes): diff --git a/samtranslator/internal/schema_source/aws_serverless_statemachine.py b/samtranslator/internal/schema_source/aws_serverless_statemachine.py index 9e6a549e9..355064767 100644 --- a/samtranslator/internal/schema_source/aws_serverless_statemachine.py +++ b/samtranslator/internal/schema_source/aws_serverless_statemachine.py @@ -174,6 +174,7 @@ class Properties(BaseModel): Type: Optional[PassThroughProp] = properties("Type") AutoPublishAlias: Optional[PassThroughProp] DeploymentPreference: Optional[PassThroughProp] + UseAliasAsEventTarget: Optional[bool] class Resource(ResourceAttributes): diff --git a/samtranslator/model/api/api_generator.py b/samtranslator/model/api/api_generator.py index dc1f5029b..92a360c11 100644 --- a/samtranslator/model/api/api_generator.py +++ b/samtranslator/model/api/api_generator.py @@ -750,8 +750,9 @@ def _add_cors(self) -> None: properties = CorsProperties(AllowOrigin=self.cors) # type: ignore[call-arg] elif isinstance(self.cors, dict): # Make sure keys in the dict are recognized - if not all(key in CorsProperties._fields for key in self.cors): - raise InvalidResourceException(self.logical_id, INVALID_ERROR) + for key in self.cors: + if key not in CorsProperties._fields: + raise InvalidResourceException(self.logical_id, f"Invalid key '{key}' for 'Cors' property.") properties = CorsProperties(**self.cors) diff --git a/samtranslator/model/api/http_api_generator.py b/samtranslator/model/api/http_api_generator.py index 6ce507137..1096fbd8f 100644 --- a/samtranslator/model/api/http_api_generator.py +++ b/samtranslator/model/api/http_api_generator.py @@ -187,8 +187,9 @@ def _add_cors(self) -> None: elif isinstance(self.cors_configuration, dict): # Make sure keys in the dict are recognized - if not all(key in CorsProperties._fields for key in self.cors_configuration): - raise InvalidResourceException(self.logical_id, "Invalid value for 'Cors' property.") + for key in self.cors_configuration: + if key not in CorsProperties._fields: + raise InvalidResourceException(self.logical_id, f"Invalid key '{key}' for 'Cors' property.") properties = CorsProperties(**self.cors_configuration) diff --git a/samtranslator/model/sam_resources.py b/samtranslator/model/sam_resources.py index ce8689cb4..ab06a0bb6 100644 --- a/samtranslator/model/sam_resources.py +++ b/samtranslator/model/sam_resources.py @@ -180,6 +180,7 @@ class SamFunction(SamResourceMacro): "RuntimeManagementConfig": PassThroughProperty(False), "LoggingConfig": PassThroughProperty(False), "RecursiveLoop": PassThroughProperty(False), + "SourceKMSKeyArn": PassThroughProperty(False), } FunctionName: Optional[Intrinsicable[str]] @@ -223,6 +224,7 @@ class SamFunction(SamResourceMacro): FunctionUrlConfig: Optional[Dict[str, Any]] LoggingConfig: Optional[Dict[str, Any]] RecursiveLoop: Optional[str] + SourceKMSKeyArn: Optional[str] event_resolver = ResourceTypeResolver( samtranslator.model.eventsources, @@ -880,7 +882,10 @@ def _construct_inline_code(*args: Any, **kwargs: Dict[str, Any]) -> Dict[str, An else: raise InvalidResourceException(self.logical_id, "Either 'InlineCode' or 'CodeUri' must be set.") dispatch_function: Callable[..., Dict[str, Any]] = artifact_dispatch[filtered_key] - return dispatch_function(artifacts[filtered_key], self.logical_id, filtered_key) + code_dict = dispatch_function(artifacts[filtered_key], self.logical_id, filtered_key) + if self.SourceKMSKeyArn and packagetype == ZIP: + code_dict["SourceKMSKeyArn"] = self.SourceKMSKeyArn + return code_dict def _construct_version( self, function: LambdaFunction, intrinsics_resolver: IntrinsicsResolver, code_sha256: Optional[str] = None @@ -1781,6 +1786,7 @@ class SamStateMachine(SamResourceMacro): "PermissionsBoundary": PropertyType(False, IS_STR), "AutoPublishAlias": PassThroughProperty(False), "DeploymentPreference": MutatedPassThroughProperty(False), + "UseAliasAsEventTarget": Property(False, IS_BOOL), } Definition: Optional[Dict[str, Any]] @@ -1799,6 +1805,7 @@ class SamStateMachine(SamResourceMacro): PermissionsBoundary: Optional[Intrinsicable[str]] AutoPublishAlias: Optional[PassThrough] DeploymentPreference: Optional[PassThrough] + UseAliasAsEventTarget: Optional[bool] event_resolver = ResourceTypeResolver( samtranslator.model.stepfunctions.events, @@ -1837,6 +1844,7 @@ def to_cloudformation(self, **kwargs): # type: ignore[no-untyped-def] get_managed_policy_map=get_managed_policy_map, auto_publish_alias=self.AutoPublishAlias, deployment_preference=self.DeploymentPreference, + use_alias_as_event_target=self.UseAliasAsEventTarget, ) generated_resources = state_machine_generator.to_cloudformation() diff --git a/samtranslator/model/stepfunctions/generators.py b/samtranslator/model/stepfunctions/generators.py index f69499a7f..e5cf744be 100644 --- a/samtranslator/model/stepfunctions/generators.py +++ b/samtranslator/model/stepfunctions/generators.py @@ -54,6 +54,7 @@ def __init__( # type: ignore[no-untyped-def] # noqa: PLR0913 get_managed_policy_map=None, auto_publish_alias=None, deployment_preference=None, + use_alias_as_event_target=None, ): """ Constructs an State Machine Generator class that generates a State Machine resource @@ -81,6 +82,7 @@ def __init__( # type: ignore[no-untyped-def] # noqa: PLR0913 :param passthrough_resource_attributes: Attributes such as `Condition` that are added to derived resources :param auto_publish_alias: Name of the state machine alias to automatically create and update :deployment_preference: Settings to enable gradual state machine deployments + :param use_alias_as_event_target: Whether to use the state machine alias as the event target """ self.logical_id = logical_id self.depends_on = depends_on @@ -110,6 +112,7 @@ def __init__( # type: ignore[no-untyped-def] # noqa: PLR0913 self.get_managed_policy_map = get_managed_policy_map self.auto_publish_alias = auto_publish_alias self.deployment_preference = deployment_preference + self.use_alias_as_event_target = use_alias_as_event_target @cw_timer(prefix="Generator", name="StateMachine") def to_cloudformation(self): # type: ignore[no-untyped-def] @@ -300,6 +303,8 @@ def _construct_alias(self, version: StepFunctionsStateMachineVersion) -> StepFun deployment_preference["StateMachineVersionArn"] = state_machine_version_arn state_machine_alias.DeploymentPreference = deployment_preference + self.state_machine_alias = state_machine_alias + return state_machine_alias def _generate_managed_traffic_shifting_resources( @@ -310,6 +315,10 @@ def _generate_managed_traffic_shifting_resources( :returns: a list containing the state machine's version and alias resources :rtype: list """ + if not self.auto_publish_alias and self.use_alias_as_event_target: + raise InvalidResourceException( + self.logical_id, "'UseAliasAsEventTarget' requires 'AutoPublishAlias' property to be specified." + ) if not self.auto_publish_alias and not self.deployment_preference: return [] if not self.auto_publish_alias and self.deployment_preference: @@ -341,7 +350,12 @@ def _generate_event_resources(self) -> List[Dict[str, Any]]: kwargs[name] = resource except (TypeError, AttributeError) as e: raise InvalidEventException(logical_id, str(e)) from e - resources += eventsource.to_cloudformation(resource=self.state_machine, **kwargs) + target_resource = ( + (self.state_machine_alias or self.state_machine) + if self.use_alias_as_event_target + else self.state_machine + ) + resources += eventsource.to_cloudformation(resource=target_resource, **kwargs) return resources diff --git a/samtranslator/plugins/globals/globals.py b/samtranslator/plugins/globals/globals.py index d6836ff52..80960a588 100644 --- a/samtranslator/plugins/globals/globals.py +++ b/samtranslator/plugins/globals/globals.py @@ -55,6 +55,7 @@ class Globals: "RuntimeManagementConfig", "LoggingConfig", "RecursiveLoop", + "SourceKMSKeyArn", ], # Everything except # DefinitionBody: because its hard to reason about merge of Swagger dictionaries @@ -99,7 +100,7 @@ class Globals: } # unreleased_properties *must be* part of supported_properties too unreleased_properties: Dict[str, List[str]] = { - SamResourceType.Function.value: ["RuntimeManagementConfig", "RecursiveLoop"], + SamResourceType.Function.value: ["RuntimeManagementConfig", "RecursiveLoop", "SourceKMSKeyArn"], } def __init__(self, template: Dict[str, Any]) -> None: diff --git a/samtranslator/schema/schema.json b/samtranslator/schema/schema.json index f56b63d8b..7dbcaed92 100644 --- a/samtranslator/schema/schema.json +++ b/samtranslator/schema/schema.json @@ -571,7 +571,7 @@ }, "RevocationConfiguration": { "$ref": "#/definitions/AWS::ACMPCA::CertificateAuthority.RevocationConfiguration", - "markdownDescription": "Certificate revocation information used by the [CreateCertificateAuthority](https://docs.aws.amazon.com/privateca/latest/APIReference/API_CreateCertificateAuthority.html) and [UpdateCertificateAuthority](https://docs.aws.amazon.com/privateca/latest/APIReference/API_UpdateCertificateAuthority.html) actions. Your private certificate authority (CA) can configure Online Certificate Status Protocol (OCSP) support and/or maintain a certificate revocation list (CRL). OCSP returns validation information about certificates as requested by clients, and a CRL contains an updated list of certificates revoked by your CA. For more information, see [RevokeCertificate](https://docs.aws.amazon.com/privateca/latest/APIReference/API_RevokeCertificate.html) in the *AWS Private CA API Reference* and [Setting up a certificate revocation method](https://docs.aws.amazon.com/privateca/latest/userguide/revocation-setup.html) in the *AWS Private CA User Guide* .\n\n> The following requirements apply to revocation configurations.\n> \n> - A configuration disabling CRLs or OCSP must contain only the `Enabled=False` parameter, and will fail if other parameters such as `CustomCname` or `ExpirationInDays` are included.\n> - In a CRL configuration, the `S3BucketName` parameter must conform to the [Amazon S3 bucket naming rules](https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html) .\n> - A configuration containing a custom Canonical Name (CNAME) parameter for CRLs or OCSP must conform to [RFC2396](https://docs.aws.amazon.com/https://www.ietf.org/rfc/rfc2396.txt) restrictions on the use of special characters in a CNAME.\n> - In a CRL or OCSP configuration, the value of a CNAME parameter must not include a protocol prefix such as \"http://\" or \"https://\".", + "markdownDescription": "Information about the Online Certificate Status Protocol (OCSP) configuration or certificate revocation list (CRL) created and maintained by your private CA.", "title": "RevocationConfiguration" }, "SigningAlgorithm": { @@ -2573,7 +2573,7 @@ "type": "string" }, "Platform": { - "markdownDescription": "The platform for the Amplify app. For a static app, set the platform type to `WEB` . For a dynamic server-side rendered (SSR) app, set the platform type to `WEB_COMPUTE` . For an app requiring Amplify Hosting's original SSR support only, set the platform type to `WEB_DYNAMIC` .", + "markdownDescription": "The platform for the Amplify app. For a static app, set the platform type to `WEB` . For a dynamic server-side rendered (SSR) app, set the platform type to `WEB_COMPUTE` . For an app requiring Amplify Hosting's original SSR support only, set the platform type to `WEB_DYNAMIC` .\n\nIf you are deploying an SSG only app with Next.js version 14 or later, you must set the platform type to `WEB_COMPUTE` and set the artifacts `baseDirectory` to `.next` in the application's build settings. For an example of the build specification settings, see [Amplify build settings for a Next.js 14 SSG application](https://docs.aws.amazon.com/amplify/latest/userguide/deploy-nextjs-app.html#build-setting-detection-ssg-14) in the *Amplify Hosting User Guide* .", "title": "Platform", "type": "string" }, @@ -8149,9 +8149,13 @@ "additionalProperties": false, "properties": { "Destination": { + "markdownDescription": "Specifies the location of the response to modify, and how to modify it. To learn more, see [Transforming API requests and responses](https://docs.aws.amazon.com/apigateway/latest/developerguide/http-api-parameter-mapping.html) .", + "title": "Destination", "type": "string" }, "Source": { + "markdownDescription": "Specifies the data to update the parameter with. To learn more, see [Transforming API requests and responses](https://docs.aws.amazon.com/apigateway/latest/developerguide/http-api-parameter-mapping.html) .", + "title": "Source", "type": "string" } }, @@ -9028,7 +9032,7 @@ "type": "string" }, "LocationUri": { - "markdownDescription": "A URI to locate the configuration. You can specify the following:\n\n- For the AWS AppConfig hosted configuration store and for feature flags, specify `hosted` .\n- For an AWS Systems Manager Parameter Store parameter, specify either the parameter name in the format `ssm-parameter://` or the ARN.\n- For an AWS CodePipeline pipeline, specify the URI in the following format: `codepipeline` ://.\n- For an AWS Secrets Manager secret, specify the URI in the following format: `secretsmanager` ://.\n- For an Amazon S3 object, specify the URI in the following format: `s3:///` . Here is an example: `s3://my-bucket/my-app/us-east-1/my-config.json`\n- For an SSM document, specify either the document name in the format `ssm-document://` or the Amazon Resource Name (ARN).", + "markdownDescription": "A URI to locate the configuration. You can specify the following:\n\n- For the AWS AppConfig hosted configuration store and for feature flags, specify `hosted` .\n- For an AWS Systems Manager Parameter Store parameter, specify either the parameter name in the format `ssm-parameter://` or the ARN.\n- For an AWS CodePipeline pipeline, specify the URI in the following format: `codepipeline` ://.\n- For an AWS Secrets Manager secret, specify the URI in the following format: `secretsmanager` ://.\n- For an Amazon S3 object, specify the URI in the following format: `s3:///` . Here is an example: `s3://amzn-s3-demo-bucket/my-app/us-east-1/my-config.json`\n- For an SSM document, specify either the document name in the format `ssm-document://` or the Amazon Resource Name (ARN).", "title": "LocationUri", "type": "string" }, @@ -20122,7 +20126,7 @@ "type": "boolean" }, "FieldLogLevel": { - "markdownDescription": "The field logging level. Values can be NONE, ERROR, or ALL.\n\n- *NONE* : No field-level logs are captured.\n- *ERROR* : Logs the following information only for the fields that are in error:\n\n- The error section in the server response.\n- Field-level errors.\n- The generated request/response functions that got resolved for error fields.\n- *ALL* : The following information is logged for all fields in the query:\n\n- Field-level tracing information.\n- The generated request/response functions that got resolved for each field.", + "markdownDescription": "The field logging level. Values can be NONE, ERROR, INFO, DEBUG, or ALL.\n\n- *NONE* : No field-level logs are captured.\n- *ERROR* : Logs the following information *only* for the fields that are in the error category:\n\n- The error section in the server response.\n- Field-level errors.\n- The generated request/response functions that got resolved for error fields.\n- *INFO* : Logs the following information *only* for the fields that are in the info and error categories:\n\n- Info-level messages.\n- The user messages sent through `$util.log.info` and `console.log` .\n- Field-level tracing and mapping logs are not shown.\n- *DEBUG* : Logs the following information *only* for the fields that are in the debug, info, and error categories:\n\n- Debug-level messages.\n- The user messages sent through `$util.log.info` , `$util.log.debug` , `console.log` , and `console.debug` .\n- Field-level tracing and mapping logs are not shown.\n- *ALL* : The following information is logged for all fields in the query:\n\n- Field-level tracing information.\n- The generated request/response functions that were resolved for each field.", "title": "FieldLogLevel", "type": "string" } @@ -21200,7 +21204,7 @@ "items": { "$ref": "#/definitions/AWS::ApplicationInsights::Application.ComponentMonitoringSetting" }, - "markdownDescription": "The monitoring settings of the components.", + "markdownDescription": "The monitoring settings of the components. Not required to set up default monitoring for all components. To set up default monitoring for all components, set `AutoConfigurationEnabled` to `true` .", "title": "ComponentMonitoringSettings", "type": "array" }, @@ -21331,7 +21335,7 @@ "additionalProperties": false, "properties": { "ComponentARN": { - "markdownDescription": "The ARN of the component.", + "markdownDescription": "The ARN of the component. Either the component ARN or the component name is required.", "title": "ComponentARN", "type": "string" }, @@ -21341,7 +21345,7 @@ "type": "string" }, "ComponentName": { - "markdownDescription": "The name of the component.", + "markdownDescription": "The name of the component. Either the component ARN or the component name is required.", "title": "ComponentName", "type": "string" }, @@ -22264,7 +22268,7 @@ "additionalProperties": false, "properties": { "S3AclOption": { - "markdownDescription": "The Amazon S3 canned ACL that Athena should specify when storing query results. Currently the only supported canned ACL is `BUCKET_OWNER_FULL_CONTROL` . If a query runs in a workgroup and the workgroup overrides client-side settings, then the Amazon S3 canned ACL specified in the workgroup's settings is used for all queries that run in the workgroup. For more information about Amazon S3 canned ACLs, see [Canned ACL](https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#canned-acl) in the *Amazon S3 User Guide* .", + "markdownDescription": "The Amazon S3 canned ACL that Athena should specify when storing query results, including data files inserted by Athena as the result of statements like CTAS or INSERT INTO. Currently the only supported canned ACL is `BUCKET_OWNER_FULL_CONTROL` . If a query runs in a workgroup and the workgroup overrides client-side settings, then the Amazon S3 canned ACL specified in the workgroup's settings is used for all queries that run in the workgroup. For more information about Amazon S3 canned ACLs, see [Canned ACL](https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#canned-acl) in the *Amazon S3 User Guide* .", "title": "S3AclOption", "type": "string" } @@ -22333,7 +22337,7 @@ }, "EncryptionConfiguration": { "$ref": "#/definitions/AWS::Athena::WorkGroup.EncryptionConfiguration", - "markdownDescription": "If query results are encrypted in Amazon S3, indicates the encryption option used (for example, `SSE_KMS` or `CSE_KMS` ) and key information. This is a client-side setting. If workgroup settings override client-side settings, then the query uses the encryption configuration that is specified for the workgroup, and also uses the location for storing query results specified in the workgroup. See `EnforceWorkGroupConfiguration` and [Workgroup Settings Override Client-Side Settings](https://docs.aws.amazon.com/athena/latest/ug/workgroups-settings-override.html) .", + "markdownDescription": "If query results are encrypted in Amazon S3, indicates the encryption option used (for example, `SSE_KMS` or `CSE_KMS` ) and key information. This is a client-side setting. If workgroup settings override client-side settings, then the query uses the encryption configuration that is specified for the workgroup, and also uses the location for storing query results specified in the workgroup. See `EnforceWorkGroupConfiguration` and [Override client-side settings](https://docs.aws.amazon.com/athena/latest/ug/workgroups-settings-override.html) .", "title": "EncryptionConfiguration" }, "ExpectedBucketOwner": { @@ -22342,7 +22346,7 @@ "type": "string" }, "OutputLocation": { - "markdownDescription": "The location in Amazon S3 where your query results are stored, such as `s3://path/to/query/bucket/` . To run a query, you must specify the query results location using either a client-side setting for individual queries or a location specified by the workgroup. If workgroup settings override client-side settings, then the query uses the location specified for the workgroup. If no query location is set, Athena issues an error. For more information, see [Working with Query Results, Output Files, and Query History](https://docs.aws.amazon.com/athena/latest/ug/querying.html) and `EnforceWorkGroupConfiguration` .", + "markdownDescription": "The location in Amazon S3 where your query results are stored, such as `s3://path/to/query/bucket/` . To run a query, you must specify the query results location using either a client-side setting for individual queries or a location specified by the workgroup. If workgroup settings override client-side settings, then the query uses the location specified for the workgroup. If no query location is set, Athena issues an error. For more information, see [Work with query results and recent queries](https://docs.aws.amazon.com/athena/latest/ug/querying.html) and `EnforceWorkGroupConfiguration` .", "title": "OutputLocation", "type": "string" } @@ -22368,7 +22372,7 @@ "title": "CustomerContentEncryptionConfiguration" }, "EnforceWorkGroupConfiguration": { - "markdownDescription": "If set to \"true\", the settings for the workgroup override client-side settings. If set to \"false\", client-side settings are used. For more information, see [Workgroup Settings Override Client-Side Settings](https://docs.aws.amazon.com/athena/latest/ug/workgroups-settings-override.html) .", + "markdownDescription": "If set to \"true\", the settings for the workgroup override client-side settings. If set to \"false\", client-side settings are used. For more information, see [Override client-side settings](https://docs.aws.amazon.com/athena/latest/ug/workgroups-settings-override.html) .", "title": "EnforceWorkGroupConfiguration", "type": "boolean" }, @@ -22394,7 +22398,7 @@ }, "ResultConfiguration": { "$ref": "#/definitions/AWS::Athena::WorkGroup.ResultConfiguration", - "markdownDescription": "Specifies the location in Amazon S3 where query results are stored and the encryption option, if any, used for query results. For more information, see [Working with Query Results, Output Files, and Query History](https://docs.aws.amazon.com/athena/latest/ug/querying.html) .", + "markdownDescription": "Specifies the location in Amazon S3 where query results are stored and the encryption option, if any, used for query results. For more information, see [Work with query results and recent queries](https://docs.aws.amazon.com/athena/latest/ug/querying.html) .", "title": "ResultConfiguration" } }, @@ -22751,7 +22755,7 @@ "type": "number" }, "HealthCheckType": { - "markdownDescription": "A comma-separated value string of one or more health check types.\n\nThe valid values are `EC2` , `ELB` , and `VPC_LATTICE` . `EC2` is the default health check and cannot be disabled. For more information, see [Health checks for instances in an Auto Scaling group](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-health-checks.html) in the *Amazon EC2 Auto Scaling User Guide* .\n\nOnly specify `EC2` if you must clear a value that was previously set.", + "markdownDescription": "A comma-separated value string of one or more health check types.\n\nThe valid values are `EC2` , `EBS` , `ELB` , and `VPC_LATTICE` . `EC2` is the default health check and cannot be disabled. For more information, see [Health checks for instances in an Auto Scaling group](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-health-checks.html) in the *Amazon EC2 Auto Scaling User Guide* .\n\nOnly specify `EC2` if you must clear a value that was previously set.", "title": "HealthCheckType", "type": "string" }, @@ -25082,7 +25086,7 @@ "properties": { "X12Details": { "$ref": "#/definitions/AWS::B2BI::Capability.X12Details", - "markdownDescription": "Returns the details for the EDI standard that is being used for the transformer. Currently, only X12 is supported. X12 is a set of standards and corresponding messages that define specific business documents.", + "markdownDescription": "", "title": "X12Details" } }, @@ -25357,18 +25361,12 @@ "additionalProperties": false, "properties": { "EdiType": { - "$ref": "#/definitions/AWS::B2BI::Transformer.EdiType", - "markdownDescription": "Returns the details for the EDI standard that is being used for the transformer. Currently, only X12 is supported. X12 is a set of standards and corresponding messages that define specific business documents.", - "title": "EdiType" + "$ref": "#/definitions/AWS::B2BI::Transformer.EdiType" }, "FileFormat": { - "markdownDescription": "Returns that the currently supported file formats for EDI transformations are `JSON` and `XML` .", - "title": "FileFormat", "type": "string" }, "MappingTemplate": { - "markdownDescription": "Returns a sample EDI document that is used by a transformer as a guide for processing the EDI data.", - "title": "MappingTemplate", "type": "string" }, "Name": { @@ -25377,8 +25375,6 @@ "type": "string" }, "SampleDocument": { - "markdownDescription": "Returns a sample EDI document that is used by a transformer as a guide for processing the EDI data.", - "title": "SampleDocument", "type": "string" }, "Status": { @@ -25429,9 +25425,7 @@ "additionalProperties": false, "properties": { "X12Details": { - "$ref": "#/definitions/AWS::B2BI::Transformer.X12Details", - "markdownDescription": "Returns the details for the EDI standard that is being used for the transformer. Currently, only X12 is supported. X12 is a set of standards and corresponding messages that define specific business documents.", - "title": "X12Details" + "$ref": "#/definitions/AWS::B2BI::Transformer.X12Details" } }, "required": [ @@ -26184,7 +26178,7 @@ "type": "object" }, "BackupVaultName": { - "markdownDescription": "The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the AWS Region where they are created. They consist of lowercase letters, numbers, and hyphens.", + "markdownDescription": "The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the AWS Region where they are created.", "title": "BackupVaultName", "type": "string" }, @@ -27651,7 +27645,7 @@ "items": { "$ref": "#/definitions/AWS::Batch::JobDefinition.EcsTaskProperties" }, - "markdownDescription": "An object that contains the properties for the Amazon ECS task definition of a job.\n\n> This object is currently limited to one element.", + "markdownDescription": "An object that contains the properties for the Amazon ECS task definition of a job.\n\n> This object is currently limited to one task element. However, the task element can run up to 10 containers.", "title": "TaskProperties", "type": "array" } @@ -28265,7 +28259,7 @@ "items": { "$ref": "#/definitions/AWS::Batch::JobDefinition.EksContainer" }, - "markdownDescription": "The properties of the container that's used on the Amazon EKS pod.", + "markdownDescription": "The properties of the container that's used on the Amazon EKS pod.\n\n> This object is limited to 10 elements.", "title": "Containers", "type": "array" }, @@ -28291,7 +28285,7 @@ "items": { "$ref": "#/definitions/AWS::Batch::JobDefinition.EksContainer" }, - "markdownDescription": "These containers run before application containers, always runs to completion, and must complete successfully before the next container starts. These containers are registered with the Amazon EKS Connector agent and persists the registration information in the Kubernetes backend data store. For more information, see [Init Containers](https://docs.aws.amazon.com/https://kubernetes.io/docs/concepts/workloads/pods/init-containers/) in the *Kubernetes documentation* .\n\n> This object is limited to 10 elements", + "markdownDescription": "These containers run before application containers, always runs to completion, and must complete successfully before the next container starts. These containers are registered with the Amazon EKS Connector agent and persists the registration information in the Kubernetes backend data store. For more information, see [Init Containers](https://docs.aws.amazon.com/https://kubernetes.io/docs/concepts/workloads/pods/init-containers/) in the *Kubernetes documentation* .\n\n> This object is limited to 10 elements.", "title": "InitContainers", "type": "array" }, @@ -29237,7 +29231,7 @@ "additionalProperties": false, "properties": { "OverrideLambda": { - "markdownDescription": "The ARN of the Lambda function to use when parsing the raw foundation model output in parts of the agent sequence. If you specify this field, at least one of the `promptConfigurations` must contain a `parserMode` value that is set to `OVERRIDDEN` . For more information, see [Parser Lambda function in Agents for Amazon Bedrock](https://docs.aws.amazon.com/bedrock/latest/userguide/lambda-parser.html) .", + "markdownDescription": "The ARN of the Lambda function to use when parsing the raw foundation model output in parts of the agent sequence. If you specify this field, at least one of the `promptConfigurations` must contain a `parserMode` value that is set to `OVERRIDDEN` . For more information, see [Parser Lambda function in Amazon Bedrock Agents](https://docs.aws.amazon.com/bedrock/latest/userguide/lambda-parser.html) .", "title": "OverrideLambda", "type": "string" }, @@ -30048,7 +30042,7 @@ }, "VectorKnowledgeBaseConfiguration": { "$ref": "#/definitions/AWS::Bedrock::KnowledgeBase.VectorKnowledgeBaseConfiguration", - "markdownDescription": "Contains details about the embeddings model that'sused to convert the data source.", + "markdownDescription": "Contains details about the model that's used to convert the data source into vector embeddings.", "title": "VectorKnowledgeBaseConfiguration" } }, @@ -32664,7 +32658,7 @@ "type": "array" }, "TeamId": { - "markdownDescription": "The ID of the Microsoft Team authorized with AWS Chatbot .\n\nTo get the team ID, you must perform the initial authorization flow with Microsoft Teams in the AWS Chatbot console. Then you can copy and paste the team ID from the console. For more details, see steps 1-4 in [Get started with Microsoft Teams](https://docs.aws.amazon.com/chatbot/latest/adminguide/teams-setup.html#teams-client-setup) in the *AWS Chatbot Administrator Guide* .", + "markdownDescription": "The ID of the Microsoft Team authorized with AWS Chatbot .\n\nTo get the team ID, you must perform the initial authorization flow with Microsoft Teams in the AWS Chatbot console. Then you can copy and paste the team ID from the console. For more details, see steps 1-3 in [Get started with Microsoft Teams](https://docs.aws.amazon.com/chatbot/latest/adminguide/teams-setup.html#teams-client-setup) in the *AWS Chatbot Administrator Guide* .", "title": "TeamId", "type": "string" }, @@ -32773,12 +32767,12 @@ "type": "string" }, "SlackChannelId": { - "markdownDescription": "The ID of the Slack channel.\n\nTo get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, `ABCBBLZZZ` .", + "markdownDescription": "The ID of the Slack channel.\n\nTo get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the character string at the end of the URL. For example, `ABCBBLZZZ` .", "title": "SlackChannelId", "type": "string" }, "SlackWorkspaceId": { - "markdownDescription": "The ID of the Slack workspace authorized with AWS Chatbot .\n\nTo get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console. Then you can copy and paste the workspace ID from the console. For more details, see steps 1-4 in [Setting Up AWS Chatbot with Slack](https://docs.aws.amazon.com/chatbot/latest/adminguide/setting-up.html#Setup_intro) in the *AWS Chatbot User Guide* .", + "markdownDescription": "The ID of the Slack workspace authorized with AWS Chatbot .\n\nTo get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console. Then you can copy and paste the workspace ID from the console. For more details, see steps 1-3 in [Tutorial: Get started with Slack](https://docs.aws.amazon.com/chatbot/latest/adminguide/slack-setup.html) in the *AWS Chatbot User Guide* .", "title": "SlackWorkspaceId", "type": "string" }, @@ -33254,7 +33248,7 @@ "items": { "$ref": "#/definitions/AWS::CleanRooms::ConfiguredTable.AnalysisRule" }, - "markdownDescription": "The entire created analysis rule.", + "markdownDescription": "The analysis rule that was created for the configured table.", "title": "AnalysisRules", "type": "array" }, @@ -33818,7 +33812,7 @@ "properties": { "S3": { "$ref": "#/definitions/AWS::CleanRooms::Membership.ProtectedQueryS3OutputConfiguration", - "markdownDescription": "Required configuration for a protected query with an `S3` output type.", + "markdownDescription": "Required configuration for a protected query with an `s3` output type.", "title": "S3" } }, @@ -33932,7 +33926,7 @@ }, "Parameters": { "$ref": "#/definitions/AWS::CleanRooms::PrivacyBudgetTemplate.Parameters", - "markdownDescription": "Specifies the epislon and noise parameters for the privacy budget template.", + "markdownDescription": "Specifies the epsilon and noise parameters for the privacy budget template.", "title": "Parameters" }, "PrivacyBudgetType": { @@ -34513,7 +34507,7 @@ "additionalProperties": false, "properties": { "Configuration": { - "markdownDescription": "Specifies the activated hook type configuration, in this AWS account and AWS Region .\n\nYou must specify either `TypeName` and `Configuration` or `TypeARN` and `Configuration` .", + "markdownDescription": "Specifies the activated hook type configuration, in this AWS account and AWS Region .\n\nYou must specify either `TypeName` and `Configuration` or `TypeArn` and `Configuration` .", "title": "Configuration", "type": "string" }, @@ -34523,12 +34517,12 @@ "type": "string" }, "TypeArn": { - "markdownDescription": "The Amazon Resource Number (ARN) for the hook to set `Configuration` for.\n\nYou must specify either `TypeName` and `Configuration` or `TypeARN` and `Configuration` .", + "markdownDescription": "The Amazon Resource Number (ARN) for the hook to set `Configuration` for.\n\nYou must specify either `TypeName` and `Configuration` or `TypeArn` and `Configuration` .", "title": "TypeArn", "type": "string" }, "TypeName": { - "markdownDescription": "The unique name for your hook. Specifies a three-part namespace for your hook, with a recommended pattern of `Organization::Service::Hook` .\n\nYou must specify either `TypeName` and `Configuration` or `TypeARN` and `Configuration` .", + "markdownDescription": "The unique name for your hook. Specifies a three-part namespace for your hook, with a recommended pattern of `Organization::Service::Hook` .\n\nYou must specify either `TypeName` and `Configuration` or `TypeArn` and `Configuration` .", "title": "TypeName", "type": "string" } @@ -34938,7 +34932,7 @@ "type": "string" }, "PublicVersionNumber": { - "markdownDescription": "The version number to assign to this version of the extension.\n\nUse the following format, and adhere to semantic versioning when assigning a version number to your extension:\n\n`MAJOR.MINOR.PATCH`\n\nFor more information, see [Semantic Versioning 2.0.0](https://docs.aws.amazon.com/https://semver.org/) .\n\nIf you don't specify a version number, CloudFormation increments the version number by one minor version release.\n\nYou cannot specify a version number the first time you publish a type. AWS CloudFormation automatically sets the first version number to be `1.0.0` .", + "markdownDescription": "The version number to assign to this version of the extension.\n\nUse the following format, and adhere to semantic versioning when assigning a version number to your extension:\n\n`MAJOR.MINOR.PATCH`\n\nFor more information, see [Semantic Versioning 2.0.0](https://docs.aws.amazon.com/https://semver.org/) .\n\nIf you don't specify a version number, CloudFormation increments the version number by one minor version release.\n\nYou cannot specify a version number the first time you publish a type. CloudFormation automatically sets the first version number to be `1.0.0` .", "title": "PublicVersionNumber", "type": "string" }, @@ -35016,7 +35010,7 @@ "type": "boolean" }, "ConnectionArn": { - "markdownDescription": "If you are using a Bitbucket or GitHub account for identity verification, the Amazon Resource Name (ARN) for your connection to that account.\n\nFor more information, see [Registering your account to publish CloudFormation extensions](https://docs.aws.amazon.com/cloudformation-cli/latest/userguide/publish-extension.html#publish-extension-prereqs) in the *CloudFormation CLI User Guide* .", + "markdownDescription": "If you are using a Bitbucket or GitHub account for identity verification, the Amazon Resource Name (ARN) for your connection to that account.\n\nFor more information, see [Prerequisite: Registering your account to publish CloudFormation extensions](https://docs.aws.amazon.com/cloudformation-cli/latest/userguide/publish-extension.html#publish-extension-prereqs) in the *AWS CloudFormation Command Line Interface (CLI) User Guide* .", "title": "ConnectionArn", "type": "string" } @@ -35258,7 +35252,7 @@ "items": { "type": "string" }, - "markdownDescription": "The Amazon Simple Notification Service (Amazon SNS) topic ARNs to publish stack related events. You can find your Amazon SNS topic ARNs using the Amazon SNS console or your Command Line Interface (CLI).", + "markdownDescription": "The Amazon SNS topic ARNs to publish stack related events. You can find your Amazon SNS topic ARNs using the Amazon SNS console or your Command Line Interface (CLI).", "title": "NotificationARNs", "type": "array" }, @@ -35277,7 +35271,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "Key-value pairs to associate with this stack. AWS CloudFormation also propagates these tags to the resources created in the stack. A maximum number of 50 tags can be specified.", + "markdownDescription": "Key-value pairs to associate with this stack. CloudFormation also propagates these tags to the resources created in the stack. A maximum number of 50 tags can be specified.", "title": "Tags", "type": "array" }, @@ -35426,7 +35420,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "Key-value pairs to associate with this stack. AWS CloudFormation also propagates these tags to supported resources in the stack. You can specify a maximum number of 50 tags.\n\nIf you don't specify this parameter, AWS CloudFormation doesn't modify the stack's tags. If you specify an empty value, AWS CloudFormation removes all associated tags.", + "markdownDescription": "Key-value pairs to associate with this stack. CloudFormation also propagates these tags to supported resources in the stack. You can specify a maximum number of 50 tags.\n\nIf you don't specify this parameter, CloudFormation doesn't modify the stack's tags. If you specify an empty value, CloudFormation removes all associated tags.", "title": "Tags", "type": "array" }, @@ -35488,7 +35482,7 @@ "additionalProperties": false, "properties": { "AccountFilterType": { - "markdownDescription": "Limit deployment targets to individual accounts or include additional accounts with provided OUs.\n\nThe following is a list of possible values for the `AccountFilterType` operation.\n\n- `INTERSECTION` : StackSets deploys to the accounts specified in `Accounts` parameter.\n- `DIFFERENCE` : StackSets excludes the accounts specified in `Accounts` parameter. This enables user to avoid certain accounts within an OU such as suspended accounts.\n- `UNION` : StackSets includes additional accounts deployment targets.\n\nThis is the default value if `AccountFilterType` is not provided. This enables user to update an entire OU and individual accounts from a different OU in one request, which used to be two separate requests.\n- `NONE` : Deploys to all the accounts in specified organizational units (OU).", + "markdownDescription": "Limit deployment targets to individual accounts or include additional accounts with provided OUs.\n\nThe following is a list of possible values for the `AccountFilterType` operation.\n\n- `INTERSECTION` : StackSet deploys to the accounts specified in the `Accounts` parameter.\n- `DIFFERENCE` : StackSet deploys to the OU, excluding the accounts specified in the `Accounts` parameter.\n- `UNION` StackSet deploys to the OU, and the accounts specified in the `Accounts` parameter. `UNION` is not supported for create operations when using StackSet as a resource.", "title": "AccountFilterType", "type": "string" }, @@ -36322,12 +36316,12 @@ "additionalProperties": false, "properties": { "Header": { - "markdownDescription": "", + "markdownDescription": "The name of the HTTP header that CloudFront uses to configure for the single header policy.", "title": "Header", "type": "string" }, "Value": { - "markdownDescription": "", + "markdownDescription": "Specifies the value to assign to the header for a single header policy.", "title": "Value", "type": "string" } @@ -36362,11 +36356,11 @@ "properties": { "SessionStickinessConfig": { "$ref": "#/definitions/AWS::CloudFront::ContinuousDeploymentPolicy.SessionStickinessConfig", - "markdownDescription": "", + "markdownDescription": "Enable session stickiness for the associated origin or cache settings.", "title": "SessionStickinessConfig" }, "Weight": { - "markdownDescription": "", + "markdownDescription": "The percentage of requests that CloudFront will use to send to an associated origin or cache settings.", "title": "Weight", "type": "number" } @@ -36835,7 +36829,7 @@ "items": { "type": "string" }, - "markdownDescription": "", + "markdownDescription": "An alias for the CloudFront distribution's domain name.\n\n> This property is legacy. We recommend that you use [Aliases](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudfront-distribution-distributionconfig.html#cfn-cloudfront-distribution-distributionconfig-aliases) instead.", "title": "CNAMEs", "type": "array" }, @@ -36867,7 +36861,7 @@ }, "CustomOrigin": { "$ref": "#/definitions/AWS::CloudFront::Distribution.LegacyCustomOrigin", - "markdownDescription": "", + "markdownDescription": "The user-defined HTTP server that serves as the origin for content that CloudFront distributes.\n\n> This property is legacy. We recommend that you use [Origin](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudfront-distribution-origin.html) instead.", "title": "CustomOrigin" }, "DefaultCacheBehavior": { @@ -36925,7 +36919,7 @@ }, "S3Origin": { "$ref": "#/definitions/AWS::CloudFront::Distribution.LegacyS3Origin", - "markdownDescription": "", + "markdownDescription": "The origin as an Amazon S3 bucket.\n\n> This property is legacy. We recommend that you use [Origin](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudfront-distribution-origin.html) instead.", "title": "S3Origin" }, "Staging": { @@ -37048,22 +37042,22 @@ "additionalProperties": false, "properties": { "DNSName": { - "markdownDescription": "", + "markdownDescription": "The domain name assigned to your CloudFront distribution.", "title": "DNSName", "type": "string" }, "HTTPPort": { - "markdownDescription": "", + "markdownDescription": "The HTTP port that CloudFront uses to connect to the origin. Specify the HTTP port that the origin listens on.", "title": "HTTPPort", "type": "number" }, "HTTPSPort": { - "markdownDescription": "", + "markdownDescription": "The HTTPS port that CloudFront uses to connect to the origin. Specify the HTTPS port that the origin listens on.", "title": "HTTPSPort", "type": "number" }, "OriginProtocolPolicy": { - "markdownDescription": "", + "markdownDescription": "Specifies the protocol (HTTP or HTTPS) that CloudFront uses to connect to the origin.", "title": "OriginProtocolPolicy", "type": "string" }, @@ -37071,7 +37065,7 @@ "items": { "type": "string" }, - "markdownDescription": "", + "markdownDescription": "The minimum SSL/TLS protocol version that CloudFront uses when communicating with your origin server over HTTPs.\n\nFor more information, see [Minimum Origin SSL Protocol](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-values-specify.html#DownloadDistValuesOriginSSLProtocols) in the *Amazon CloudFront Developer Guide* .", "title": "OriginSSLProtocols", "type": "array" } @@ -37087,12 +37081,12 @@ "additionalProperties": false, "properties": { "DNSName": { - "markdownDescription": "", + "markdownDescription": "The domain name assigned to your CloudFront distribution.", "title": "DNSName", "type": "string" }, "OriginAccessIdentity": { - "markdownDescription": "", + "markdownDescription": "The CloudFront origin access identity to associate with the distribution. Use an origin access identity to configure the distribution so that end users can only access objects in an Amazon S3 through CloudFront .\n\n> This property is legacy. We recommend that you use [OriginAccessControl](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudfront-originaccesscontrol.html) instead.", "title": "OriginAccessIdentity", "type": "string" } @@ -39305,7 +39299,7 @@ "type": "array" }, "Field": { - "markdownDescription": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `readOnly` , `eventCategory` , and `eventSource` .\n\nFor CloudTrail data events, supported fields include `readOnly` , `eventCategory` , `eventName` , `resources.type` , and `resources.ARN` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - Optional. Can be set to `Equals` a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - For filtering management events only. This can be set to `NotEquals` `kms.amazonaws.com` or `NotEquals` `rdsdata.amazonaws.com` .\n- *`eventName`* - Can use any operator. You can use it to \ufb01lter in or \ufb01lter out any data event logged to CloudTrail, such as `PutBucket` or `GetSnapshotBlock` . You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator, and the value can be one of the following:\n\n- `AWS::AppConfig::Configuration`\n- `AWS::B2BI::Transformer`\n- `AWS::Bedrock::AgentAlias`\n- `AWS::Bedrock::FlowAlias`\n- `AWS::Bedrock::Guardrail`\n- `AWS::Bedrock::KnowledgeBase`\n- `AWS::Cassandra::Table`\n- `AWS::CloudFront::KeyValueStore`\n- `AWS::CloudTrail::Channel`\n- `AWS::CloudWatch::Metric`\n- `AWS::CodeWhisperer::Customization`\n- `AWS::CodeWhisperer::Profile`\n- `AWS::Cognito::IdentityPool`\n- `AWS::DynamoDB::Stream`\n- `AWS::DynamoDB::Table`\n- `AWS::EC2::Snapshot`\n- `AWS::EMRWAL::Workspace`\n- `AWS::FinSpace::Environment`\n- `AWS::Glue::Table`\n- `AWS::GreengrassV2::ComponentVersion`\n- `AWS::GreengrassV2::Deployment`\n- `AWS::GuardDuty::Detector`\n- `AWS::IoT::Certificate`\n- `AWS::IoT::Thing`\n- `AWS::IoTSiteWise::Asset`\n- `AWS::IoTSiteWise::TimeSeries`\n- `AWS::IoTTwinMaker::Entity`\n- `AWS::IoTTwinMaker::Workspace`\n- `AWS::KendraRanking::ExecutionPlan`\n- `AWS::Kinesis::Stream`\n- `AWS::Kinesis::StreamConsumer`\n- `AWS::KinesisVideo::Stream`\n- `AWS::Lambda::Function`\n- `AWS::MachineLearning::MlModel`\n- `AWS::ManagedBlockchain::Network`\n- `AWS::ManagedBlockchain::Node`\n- `AWS::MedicalImaging::Datastore`\n- `AWS::NeptuneGraph::Graph`\n- `AWS::PaymentCryptography::Alias`\n- `AWS::PaymentCryptography::Key`\n- `AWS::PCAConnectorAD::Connector`\n- `AWS::PCAConnectorSCEP::Connector`\n- `AWS::QApps:QApp`\n- `AWS::QBusiness::Application`\n- `AWS::QBusiness::DataSource`\n- `AWS::QBusiness::Index`\n- `AWS::QBusiness::WebExperience`\n- `AWS::RDS::DBCluster`\n- `AWS::S3::AccessPoint`\n- `AWS::S3::Object`\n- `AWS::S3Express::Object`\n- `AWS::S3ObjectLambda::AccessPoint`\n- `AWS::S3Outposts::Object`\n- `AWS::SageMaker::Endpoint`\n- `AWS::SageMaker::ExperimentTrialComponent`\n- `AWS::SageMaker::FeatureGroup`\n- `AWS::ServiceDiscovery::Namespace`\n- `AWS::ServiceDiscovery::Service`\n- `AWS::SCN::Instance`\n- `AWS::SNS::PlatformEndpoint`\n- `AWS::SNS::Topic`\n- `AWS::SQS::Queue`\n- `AWS::SSM::ManagedNode`\n- `AWS::SSMMessages::ControlChannel`\n- `AWS::StepFunctions::StateMachine`\n- `AWS::SWF::Domain`\n- `AWS::ThinClient::Device`\n- `AWS::ThinClient::Environment`\n- `AWS::Timestream::Database`\n- `AWS::Timestream::Table`\n- `AWS::VerifiedPermissions::PolicyStore`\n- `AWS::XRay::Trace`\n\nYou can have only one `resources.type` \ufb01eld per selector. To log data events on more than one resource type, add another selector.\n- *`resources.ARN`* - You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value. For information about filtering on the `resources.ARN` field, see [Filtering data events by resources.ARN](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/filtering-data-events.html#filtering-data-events-resourcearn) in the *AWS CloudTrail User Guide* .\n\n> You can't use the `resources.ARN` field to filter resource types that do not have ARNs.", + "markdownDescription": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `eventCategory` (required), `eventSource` , and `readOnly` .\n\nFor CloudTrail data events, supported fields include `eventCategory` (required), `resources.type` (required), `eventName` , `readOnly` , and `resources.ARN` .\n\nFor CloudTrail network activity events, supported fields include `eventCategory` (required), `eventSource` (required), `eventName` , `errorCode` , and `vpcEndpointId` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - This is an optional field that is only used for management events and data events. This field can be set to `Equals` with a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - This field is only used for management events and network activity events.\n\nFor management events, this is an optional field that can be set to `NotEquals` `kms.amazonaws.com` to exclude KMS management events, or `NotEquals` `rdsdata.amazonaws.com` to exclude RDS management events.\n\nFor network activity events, this is a required field that only uses the `Equals` operator. Set this field to the event source for which you want to log network activity events. If you want to log network activity events for multiple event sources, you must create a separate field selector for each event source.\n\nThe following are valid values for network activity events:\n\n- `cloudtrail.amazonaws.com`\n- `ec2.amazonaws.com`\n- `kms.amazonaws.com`\n- `secretsmanager.amazonaws.com`\n- *`eventName`* - This is an optional field that is only used for data events and network activity events. You can use any operator with `eventName` . You can use it to \ufb01lter in or \ufb01lter out specific events. You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This field is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n- For CloudTrail network activity events, the value must be `NetworkActivity` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`errorCode`* - This \ufb01eld is only used to filter CloudTrail network activity events and is optional. This is the error code to filter on. Currently, the only valid `errorCode` is `VpceAccessDenied` . `errorCode` can only use the `Equals` operator.\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator.\n\nFor a list of available resource types for data events, see [Data events](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-data-events-with-cloudtrail.html#logging-data-events) in the *AWS CloudTrail User Guide* .\n\nYou can have only one `resources.type` \ufb01eld per selector. To log events on more than one resource type, add another selector.\n- *`resources.ARN`* - The `resources.ARN` is an optional field for data events. You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value.\n\nFor information about filtering data events on the `resources.ARN` field, see [Filtering data events by resources.ARN](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/filtering-data-events.html#filtering-data-events-resourcearn) in the *AWS CloudTrail User Guide* .\n\n> You can't use the `resources.ARN` field to filter resource types that do not have ARNs.\n- *`vpcEndpointId`* - This \ufb01eld is only used to filter CloudTrail network activity events and is optional. This field identifies the VPC endpoint that the request passed through. You can use any operator with `vpcEndpointId` .", "title": "Field", "type": "string" }, @@ -39470,7 +39464,7 @@ "items": { "$ref": "#/definitions/AWS::CloudTrail::Trail.AdvancedEventSelector" }, - "markdownDescription": "Specifies the settings for advanced event selectors. You can add advanced event selectors, and conditions for your advanced event selectors, up to a maximum of 500 values for all conditions and selectors on a trail. You can use either `AdvancedEventSelectors` or `EventSelectors` , but not both. If you apply `AdvancedEventSelectors` to a trail, any existing `EventSelectors` are overwritten. For more information about advanced event selectors, see [Logging data events](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-data-events-with-cloudtrail.html) in the *AWS CloudTrail User Guide* .", + "markdownDescription": "Specifies the settings for advanced event selectors. You can use advanced event selectors to log management events, data events for all resource types, and network activity events.\n\nYou can add advanced event selectors, and conditions for your advanced event selectors, up to a maximum of 500 values for all conditions and selectors on a trail. You can use either `AdvancedEventSelectors` or `EventSelectors` , but not both. If you apply `AdvancedEventSelectors` to a trail, any existing `EventSelectors` are overwritten. For more information about advanced event selectors, see [Logging data events](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-data-events-with-cloudtrail.html) and [Logging network activity events](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-network-events-with-cloudtrail.html) in the *AWS CloudTrail User Guide* .", "title": "AdvancedEventSelectors", "type": "array" }, @@ -39628,7 +39622,7 @@ "type": "array" }, "Field": { - "markdownDescription": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `readOnly` , `eventCategory` , and `eventSource` .\n\nFor CloudTrail data events, supported fields include `readOnly` , `eventCategory` , `eventName` , `resources.type` , and `resources.ARN` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - Optional. Can be set to `Equals` a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - For filtering management events only. This can be set to `NotEquals` `kms.amazonaws.com` or `NotEquals` `rdsdata.amazonaws.com` .\n- *`eventName`* - Can use any operator. You can use it to \ufb01lter in or \ufb01lter out any data event logged to CloudTrail, such as `PutBucket` or `GetSnapshotBlock` . You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator, and the value can be one of the following:\n\n- `AWS::AppConfig::Configuration`\n- `AWS::B2BI::Transformer`\n- `AWS::Bedrock::AgentAlias`\n- `AWS::Bedrock::FlowAlias`\n- `AWS::Bedrock::Guardrail`\n- `AWS::Bedrock::KnowledgeBase`\n- `AWS::Cassandra::Table`\n- `AWS::CloudFront::KeyValueStore`\n- `AWS::CloudTrail::Channel`\n- `AWS::CloudWatch::Metric`\n- `AWS::CodeWhisperer::Customization`\n- `AWS::CodeWhisperer::Profile`\n- `AWS::Cognito::IdentityPool`\n- `AWS::DynamoDB::Stream`\n- `AWS::DynamoDB::Table`\n- `AWS::EC2::Snapshot`\n- `AWS::EMRWAL::Workspace`\n- `AWS::FinSpace::Environment`\n- `AWS::Glue::Table`\n- `AWS::GreengrassV2::ComponentVersion`\n- `AWS::GreengrassV2::Deployment`\n- `AWS::GuardDuty::Detector`\n- `AWS::IoT::Certificate`\n- `AWS::IoT::Thing`\n- `AWS::IoTSiteWise::Asset`\n- `AWS::IoTSiteWise::TimeSeries`\n- `AWS::IoTTwinMaker::Entity`\n- `AWS::IoTTwinMaker::Workspace`\n- `AWS::KendraRanking::ExecutionPlan`\n- `AWS::Kinesis::Stream`\n- `AWS::Kinesis::StreamConsumer`\n- `AWS::KinesisVideo::Stream`\n- `AWS::Lambda::Function`\n- `AWS::MachineLearning::MlModel`\n- `AWS::ManagedBlockchain::Network`\n- `AWS::ManagedBlockchain::Node`\n- `AWS::MedicalImaging::Datastore`\n- `AWS::NeptuneGraph::Graph`\n- `AWS::PaymentCryptography::Alias`\n- `AWS::PaymentCryptography::Key`\n- `AWS::PCAConnectorAD::Connector`\n- `AWS::PCAConnectorSCEP::Connector`\n- `AWS::QApps:QApp`\n- `AWS::QBusiness::Application`\n- `AWS::QBusiness::DataSource`\n- `AWS::QBusiness::Index`\n- `AWS::QBusiness::WebExperience`\n- `AWS::RDS::DBCluster`\n- `AWS::S3::AccessPoint`\n- `AWS::S3::Object`\n- `AWS::S3Express::Object`\n- `AWS::S3ObjectLambda::AccessPoint`\n- `AWS::S3Outposts::Object`\n- `AWS::SageMaker::Endpoint`\n- `AWS::SageMaker::ExperimentTrialComponent`\n- `AWS::SageMaker::FeatureGroup`\n- `AWS::ServiceDiscovery::Namespace`\n- `AWS::ServiceDiscovery::Service`\n- `AWS::SCN::Instance`\n- `AWS::SNS::PlatformEndpoint`\n- `AWS::SNS::Topic`\n- `AWS::SQS::Queue`\n- `AWS::SSM::ManagedNode`\n- `AWS::SSMMessages::ControlChannel`\n- `AWS::StepFunctions::StateMachine`\n- `AWS::SWF::Domain`\n- `AWS::ThinClient::Device`\n- `AWS::ThinClient::Environment`\n- `AWS::Timestream::Database`\n- `AWS::Timestream::Table`\n- `AWS::VerifiedPermissions::PolicyStore`\n- `AWS::XRay::Trace`\n\nYou can have only one `resources.type` \ufb01eld per selector. To log data events on more than one resource type, add another selector.\n- *`resources.ARN`* - You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value. For information about filtering on the `resources.ARN` field, see [Filtering data events by resources.ARN](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/filtering-data-events.html#filtering-data-events-resourcearn) in the *AWS CloudTrail User Guide* .\n\n> You can't use the `resources.ARN` field to filter resource types that do not have ARNs.", + "markdownDescription": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `eventCategory` (required), `eventSource` , and `readOnly` .\n\nFor CloudTrail data events, supported fields include `eventCategory` (required), `resources.type` (required), `eventName` , `readOnly` , and `resources.ARN` .\n\nFor CloudTrail network activity events, supported fields include `eventCategory` (required), `eventSource` (required), `eventName` , `errorCode` , and `vpcEndpointId` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - This is an optional field that is only used for management events and data events. This field can be set to `Equals` with a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - This field is only used for management events and network activity events.\n\nFor management events, this is an optional field that can be set to `NotEquals` `kms.amazonaws.com` to exclude KMS management events, or `NotEquals` `rdsdata.amazonaws.com` to exclude RDS management events.\n\nFor network activity events, this is a required field that only uses the `Equals` operator. Set this field to the event source for which you want to log network activity events. If you want to log network activity events for multiple event sources, you must create a separate field selector for each event source.\n\nThe following are valid values for network activity events:\n\n- `cloudtrail.amazonaws.com`\n- `ec2.amazonaws.com`\n- `kms.amazonaws.com`\n- `secretsmanager.amazonaws.com`\n- *`eventName`* - This is an optional field that is only used for data events and network activity events. You can use any operator with `eventName` . You can use it to \ufb01lter in or \ufb01lter out specific events. You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This field is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n- For CloudTrail network activity events, the value must be `NetworkActivity` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`errorCode`* - This \ufb01eld is only used to filter CloudTrail network activity events and is optional. This is the error code to filter on. Currently, the only valid `errorCode` is `VpceAccessDenied` . `errorCode` can only use the `Equals` operator.\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator.\n\nFor a list of available resource types for data events, see [Data events](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-data-events-with-cloudtrail.html#logging-data-events) in the *AWS CloudTrail User Guide* .\n\nYou can have only one `resources.type` \ufb01eld per selector. To log events on more than one resource type, add another selector.\n- *`resources.ARN`* - The `resources.ARN` is an optional field for data events. You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value.\n\nFor information about filtering data events on the `resources.ARN` field, see [Filtering data events by resources.ARN](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/filtering-data-events.html#filtering-data-events-resourcearn) in the *AWS CloudTrail User Guide* .\n\n> You can't use the `resources.ARN` field to filter resource types that do not have ARNs.\n- *`vpcEndpointId`* - This \ufb01eld is only used to filter CloudTrail network activity events and is optional. This field identifies the VPC endpoint that the request passed through. You can use any operator with `vpcEndpointId` .", "title": "Field", "type": "string" }, @@ -39674,7 +39668,7 @@ "additionalProperties": false, "properties": { "Type": { - "markdownDescription": "The resource type in which you want to log data events. You can specify the following *basic* event selector resource types:\n\n- `AWS::DynamoDB::Table`\n- `AWS::Lambda::Function`\n- `AWS::S3::Object`\n\nAdditional resource types are available through *advanced* event selectors. For more information about these additional resource types, see [AdvancedFieldSelector](https://docs.aws.amazon.com/awscloudtrail/latest/APIReference/API_AdvancedFieldSelector.html) .", + "markdownDescription": "The resource type in which you want to log data events. You can specify the following *basic* event selector resource types:\n\n- `AWS::DynamoDB::Table`\n- `AWS::Lambda::Function`\n- `AWS::S3::Object`\n\nAdditional resource types are available through *advanced* event selectors. For more information, see [AdvancedEventSelector](https://docs.aws.amazon.com/awscloudtrail/latest/APIReference/API_AdvancedEventSelector.html) .", "title": "Type", "type": "string" }, @@ -39682,7 +39676,7 @@ "items": { "type": "string" }, - "markdownDescription": "An array of Amazon Resource Name (ARN) strings or partial ARN strings for the specified resource type.\n\n- To log data events for all objects in all S3 buckets in your AWS account , specify the prefix as `arn:aws:s3` .\n\n> This also enables logging of data event activity performed by any user or role in your AWS account , even if that activity is performed on a bucket that belongs to another AWS account .\n- To log data events for all objects in an S3 bucket, specify the bucket and an empty object prefix such as `arn:aws:s3:::DOC-EXAMPLE-BUCKET1/` . The trail logs data events for all objects in this S3 bucket.\n- To log data events for specific objects, specify the S3 bucket and object prefix such as `arn:aws:s3:::DOC-EXAMPLE-BUCKET1/example-images` . The trail logs data events for objects in this S3 bucket that match the prefix.\n- To log data events for all Lambda functions in your AWS account , specify the prefix as `arn:aws:lambda` .\n\n> This also enables logging of `Invoke` activity performed by any user or role in your AWS account , even if that activity is performed on a function that belongs to another AWS account .\n- To log data events for a specific Lambda function, specify the function ARN.\n\n> Lambda function ARNs are exact. For example, if you specify a function ARN *arn:aws:lambda:us-west-2:111111111111:function:helloworld* , data events will only be logged for *arn:aws:lambda:us-west-2:111111111111:function:helloworld* . They will not be logged for *arn:aws:lambda:us-west-2:111111111111:function:helloworld2* .\n- To log data events for all DynamoDB tables in your AWS account , specify the prefix as `arn:aws:dynamodb` .", + "markdownDescription": "An array of Amazon Resource Name (ARN) strings or partial ARN strings for the specified resource type.\n\n- To log data events for all objects in all S3 buckets in your AWS account , specify the prefix as `arn:aws:s3` .\n\n> This also enables logging of data event activity performed by any user or role in your AWS account , even if that activity is performed on a bucket that belongs to another AWS account .\n- To log data events for all objects in an S3 bucket, specify the bucket and an empty object prefix such as `arn:aws:s3:::amzn-s3-demo-bucket1/` . The trail logs data events for all objects in this S3 bucket.\n- To log data events for specific objects, specify the S3 bucket and object prefix such as `arn:aws:s3:::amzn-s3-demo-bucket1/example-images` . The trail logs data events for objects in this S3 bucket that match the prefix.\n- To log data events for all Lambda functions in your AWS account , specify the prefix as `arn:aws:lambda` .\n\n> This also enables logging of `Invoke` activity performed by any user or role in your AWS account , even if that activity is performed on a function that belongs to another AWS account .\n- To log data events for a specific Lambda function, specify the function ARN.\n\n> Lambda function ARNs are exact. For example, if you specify a function ARN *arn:aws:lambda:us-west-2:111111111111:function:helloworld* , data events will only be logged for *arn:aws:lambda:us-west-2:111111111111:function:helloworld* . They will not be logged for *arn:aws:lambda:us-west-2:111111111111:function:helloworld2* .\n- To log data events for all DynamoDB tables in your AWS account , specify the prefix as `arn:aws:dynamodb` .", "title": "Values", "type": "array" } @@ -41247,12 +41241,12 @@ "type": "number" }, "ComputeType": { - "markdownDescription": "Information about the compute resources the compute fleet uses. Available values include:\n\n- `BUILD_GENERAL1_SMALL` : Use up to 3 GB memory and 2 vCPUs for builds.\n- `BUILD_GENERAL1_MEDIUM` : Use up to 7 GB memory and 4 vCPUs for builds.\n- `BUILD_GENERAL1_LARGE` : Use up to 16 GB memory and 8 vCPUs for builds, depending on your environment type.\n- `BUILD_GENERAL1_XLARGE` : Use up to 70 GB memory and 36 vCPUs for builds, depending on your environment type.\n- `BUILD_GENERAL1_2XLARGE` : Use up to 145 GB memory, 72 vCPUs, and 824 GB of SSD storage for builds. This compute type supports Docker images up to 100 GB uncompressed.\n\nIf you use `BUILD_GENERAL1_SMALL` :\n\n- For environment type `LINUX_CONTAINER` , you can use up to 3 GB memory and 2 vCPUs for builds.\n- For environment type `LINUX_GPU_CONTAINER` , you can use up to 16 GB memory, 4 vCPUs, and 1 NVIDIA A10G Tensor Core GPU for builds.\n- For environment type `ARM_CONTAINER` , you can use up to 4 GB memory and 2 vCPUs on ARM-based processors for builds.\n\nIf you use `BUILD_GENERAL1_LARGE` :\n\n- For environment type `LINUX_CONTAINER` , you can use up to 15 GB memory and 8 vCPUs for builds.\n- For environment type `LINUX_GPU_CONTAINER` , you can use up to 255 GB memory, 32 vCPUs, and 4 NVIDIA Tesla V100 GPUs for builds.\n- For environment type `ARM_CONTAINER` , you can use up to 16 GB memory and 8 vCPUs on ARM-based processors for builds.\n\nFor more information, see [Build environment compute types](https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-compute-types.html) in the *AWS CodeBuild User Guide.*", + "markdownDescription": "> Updating this field is not allowed for `MAC_ARM` . \n\nInformation about the compute resources the compute fleet uses. Available values include:\n\n- `BUILD_GENERAL1_SMALL` : Use up to 3 GB memory and 2 vCPUs for builds.\n- `BUILD_GENERAL1_MEDIUM` : Use up to 7 GB memory and 4 vCPUs for builds.\n- `BUILD_GENERAL1_LARGE` : Use up to 16 GB memory and 8 vCPUs for builds, depending on your environment type.\n- `BUILD_GENERAL1_XLARGE` : Use up to 70 GB memory and 36 vCPUs for builds, depending on your environment type.\n- `BUILD_GENERAL1_2XLARGE` : Use up to 145 GB memory, 72 vCPUs, and 824 GB of SSD storage for builds. This compute type supports Docker images up to 100 GB uncompressed.\n\nIf you use `BUILD_GENERAL1_SMALL` :\n\n- For environment type `LINUX_CONTAINER` , you can use up to 3 GB memory and 2 vCPUs for builds.\n- For environment type `LINUX_GPU_CONTAINER` , you can use up to 16 GB memory, 4 vCPUs, and 1 NVIDIA A10G Tensor Core GPU for builds.\n- For environment type `ARM_CONTAINER` , you can use up to 4 GB memory and 2 vCPUs on ARM-based processors for builds.\n\nIf you use `BUILD_GENERAL1_LARGE` :\n\n- For environment type `LINUX_CONTAINER` , you can use up to 15 GB memory and 8 vCPUs for builds.\n- For environment type `LINUX_GPU_CONTAINER` , you can use up to 255 GB memory, 32 vCPUs, and 4 NVIDIA Tesla V100 GPUs for builds.\n- For environment type `ARM_CONTAINER` , you can use up to 16 GB memory and 8 vCPUs on ARM-based processors for builds.\n\nFor more information, see [Build environment compute types](https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-compute-types.html) in the *AWS CodeBuild User Guide.*", "title": "ComputeType", "type": "string" }, "EnvironmentType": { - "markdownDescription": "The environment type of the compute fleet.\n\n- The environment type `ARM_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), Asia Pacific (Mumbai), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), EU (Frankfurt), and South America (S\u00e3o Paulo).\n- The environment type `LINUX_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), South America (S\u00e3o Paulo), and Asia Pacific (Mumbai).\n- The environment type `LINUX_GPU_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), and Asia Pacific (Sydney).\n- The environment type `WINDOWS_SERVER_2019_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), Asia Pacific (Sydney), Asia Pacific (Tokyo), Asia Pacific (Mumbai) and EU (Ireland).\n- The environment type `WINDOWS_SERVER_2022_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Sydney), Asia Pacific (Singapore), Asia Pacific (Tokyo), South America (S\u00e3o Paulo) and Asia Pacific (Mumbai).\n\nFor more information, see [Build environment compute types](https://docs.aws.amazon.com//codebuild/latest/userguide/build-env-ref-compute-types.html) in the *AWS CodeBuild user guide* .", + "markdownDescription": "> Updating this field is not allowed for `MAC_ARM` . \n\nThe environment type of the compute fleet.\n\n- The environment type `ARM_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), Asia Pacific (Mumbai), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), EU (Frankfurt), and South America (S\u00e3o Paulo).\n- The environment type `LINUX_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), South America (S\u00e3o Paulo), and Asia Pacific (Mumbai).\n- The environment type `LINUX_GPU_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), and Asia Pacific (Sydney).\n- The environment type `WINDOWS_SERVER_2019_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), Asia Pacific (Sydney), Asia Pacific (Tokyo), Asia Pacific (Mumbai) and EU (Ireland).\n- The environment type `WINDOWS_SERVER_2022_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Sydney), Asia Pacific (Singapore), Asia Pacific (Tokyo), South America (S\u00e3o Paulo) and Asia Pacific (Mumbai).\n\nFor more information, see [Build environment compute types](https://docs.aws.amazon.com//codebuild/latest/userguide/build-env-ref-compute-types.html) in the *AWS CodeBuild user guide* .", "title": "EnvironmentType", "type": "string" }, @@ -41920,7 +41914,7 @@ "properties": { "Auth": { "$ref": "#/definitions/AWS::CodeBuild::Project.SourceAuth", - "markdownDescription": "Information about the authorization settings for AWS CodeBuild to access the source code to be built.\n\nThis information is for the AWS CodeBuild console's use only. Your code should not get or set `Auth` directly.", + "markdownDescription": "Information about the authorization settings for AWS CodeBuild to access the source code to be built.", "title": "Auth" }, "BuildSpec": { @@ -41978,12 +41972,12 @@ "additionalProperties": false, "properties": { "Resource": { - "markdownDescription": "The resource value that applies to the specified authorization type.\n\n> This data type is used by the AWS CodeBuild console only.", + "markdownDescription": "The resource value that applies to the specified authorization type.", "title": "Resource", "type": "string" }, "Type": { - "markdownDescription": "The authorization type to use. The only valid value is `OAUTH` , which represents the OAuth authorization type.\n\n> This data type is used by the AWS CodeBuild console only.", + "markdownDescription": "The authorization type to use. Valid options are OAUTH, CODECONNECTIONS, or SECRETS_MANAGER.", "title": "Type", "type": "string" } @@ -42230,7 +42224,7 @@ "additionalProperties": false, "properties": { "AuthType": { - "markdownDescription": "The type of authentication used by the credentials. Valid options are OAUTH, BASIC_AUTH, PERSONAL_ACCESS_TOKEN, or CODECONNECTIONS.", + "markdownDescription": "The type of authentication used by the credentials. Valid options are OAUTH, BASIC_AUTH, PERSONAL_ACCESS_TOKEN, CODECONNECTIONS, or SECRETS_MANAGER.", "title": "AuthType", "type": "string" }, @@ -42240,7 +42234,7 @@ "type": "string" }, "Token": { - "markdownDescription": "For GitHub or GitHub Enterprise, this is the personal access token. For Bitbucket, this is either the access token or the app password. For the `authType` CODECONNECTIONS, this is the `connectionArn` .", + "markdownDescription": "For GitHub or GitHub Enterprise, this is the personal access token. For Bitbucket, this is either the access token or the app password. For the `authType` CODECONNECTIONS, this is the `connectionArn` . For the `authType` SECRETS_MANAGER, this is the `secretArn` .", "title": "Token", "type": "string" }, @@ -44531,7 +44525,7 @@ "additionalProperties": false, "properties": { "Authentication": { - "markdownDescription": "Supported options are GITHUB_HMAC, IP, and UNAUTHENTICATED.\n\n> When creating CodePipeline webhooks, do not use your own credentials or reuse the same secret token across multiple webhooks. For optimal security, generate a unique secret token for each webhook you create. The secret token is an arbitrary string that you provide, which GitHub uses to compute and sign the webhook payloads sent to CodePipeline, for protecting the integrity and authenticity of the webhook payloads. Using your own credentials or reusing the same token across multiple webhooks can lead to security vulnerabilities. \n\n- For information about the authentication scheme implemented by GITHUB_HMAC, see [Securing your webhooks](https://docs.aws.amazon.com/https://developer.github.com/webhooks/securing/) on the GitHub Developer website.\n- IP rejects webhooks trigger requests unless they originate from an IP address in the IP range whitelisted in the authentication configuration.\n- UNAUTHENTICATED accepts all webhook trigger requests regardless of origin.", + "markdownDescription": "Supported options are GITHUB_HMAC, IP, and UNAUTHENTICATED.\n\n> When creating CodePipeline webhooks, do not use your own credentials or reuse the same secret token across multiple webhooks. For optimal security, generate a unique secret token for each webhook you create. The secret token is an arbitrary string that you provide, which GitHub uses to compute and sign the webhook payloads sent to CodePipeline, for protecting the integrity and authenticity of the webhook payloads. Using your own credentials or reusing the same token across multiple webhooks can lead to security vulnerabilities. > If a secret token was provided, it will be redacted in the response. \n\n- For information about the authentication scheme implemented by GITHUB_HMAC, see [Securing your webhooks](https://docs.aws.amazon.com/https://developer.github.com/webhooks/securing/) on the GitHub Developer website.\n- IP rejects webhooks trigger requests unless they originate from an IP address in the IP range whitelisted in the authentication configuration.\n- UNAUTHENTICATED accepts all webhook trigger requests regardless of origin.", "title": "Authentication", "type": "string" }, @@ -44614,7 +44608,7 @@ "type": "string" }, "SecretToken": { - "markdownDescription": "The property used to configure GitHub authentication. For GITHUB_HMAC, only the `SecretToken` property must be set.\n\n> When creating CodePipeline webhooks, do not use your own credentials or reuse the same secret token across multiple webhooks. For optimal security, generate a unique secret token for each webhook you create. The secret token is an arbitrary string that you provide, which GitHub uses to compute and sign the webhook payloads sent to CodePipeline, for protecting the integrity and authenticity of the webhook payloads. Using your own credentials or reusing the same token across multiple webhooks can lead to security vulnerabilities.", + "markdownDescription": "The property used to configure GitHub authentication. For GITHUB_HMAC, only the `SecretToken` property must be set.\n\n> When creating CodePipeline webhooks, do not use your own credentials or reuse the same secret token across multiple webhooks. For optimal security, generate a unique secret token for each webhook you create. The secret token is an arbitrary string that you provide, which GitHub uses to compute and sign the webhook payloads sent to CodePipeline, for protecting the integrity and authenticity of the webhook payloads. Using your own credentials or reusing the same token across multiple webhooks can lead to security vulnerabilities. > If a secret token was provided, it will be redacted in the response.", "title": "SecretToken", "type": "string" } @@ -45694,12 +45688,12 @@ "items": { "$ref": "#/definitions/AWS::Cognito::LogDeliveryConfiguration.LogConfiguration" }, - "markdownDescription": "The detailed activity logging destination of a user pool.", + "markdownDescription": "A logging destination of a user pool. User pools can have multiple logging destinations for message-delivery and user-activity logs.", "title": "LogConfigurations", "type": "array" }, "UserPoolId": { - "markdownDescription": "The ID of the user pool where you configured detailed activity logging.", + "markdownDescription": "The ID of the user pool where you configured logging.", "title": "UserPoolId", "type": "string" } @@ -45746,16 +45740,16 @@ "properties": { "CloudWatchLogsConfiguration": { "$ref": "#/definitions/AWS::Cognito::LogDeliveryConfiguration.CloudWatchLogsConfiguration", - "markdownDescription": "The CloudWatch logging destination of a user pool detailed activity logging configuration.", + "markdownDescription": "Configuration for the CloudWatch log group destination of user pool detailed activity logging, or of user activity log export with advanced security features.\n\nThis data type is a request parameter of [SetLogDeliveryConfiguration](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_SetLogDeliveryConfiguration.html) and a response parameter of [GetLogDeliveryConfiguration](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_GetLogDeliveryConfiguration.html) .", "title": "CloudWatchLogsConfiguration" }, "EventSource": { - "markdownDescription": "The source of events that your user pool sends for detailed activity logging.", + "markdownDescription": "The source of events that your user pool sends for logging. To send error-level logs about user notification activity, set to `userNotification` . To send info-level logs about advanced security features user activity, set to `userAuthEvents` .", "title": "EventSource", "type": "string" }, "LogLevel": { - "markdownDescription": "The `errorlevel` selection of logs that a user pool sends for detailed activity logging.", + "markdownDescription": "The `errorlevel` selection of logs that a user pool sends for detailed activity logging. To send `userNotification` activity with [information about message delivery](https://docs.aws.amazon.com/cognito/latest/developerguide/tracking-quotas-and-usage-in-cloud-watch-logs.html) , choose `ERROR` with `CloudWatchLogsConfiguration` . To send `userAuthEvents` activity with user logs from advanced security features, choose `INFO` with one of `CloudWatchLogsConfiguration` , `FirehoseConfiguration` , or `S3Configuration` .", "title": "LogLevel", "type": "string" } @@ -45804,7 +45798,7 @@ }, "AdminCreateUserConfig": { "$ref": "#/definitions/AWS::Cognito::UserPool.AdminCreateUserConfig", - "markdownDescription": "The configuration for creating a new user profile.", + "markdownDescription": "The settings for administrator creation of users in a user pool. Contains settings for allowing user sign-up, customizing invitation messages to new users, and the amount of time before temporary passwords expire.\n\nThis data type is a request and response parameter of [CreateUserPool](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_CreateUserPool.html) and [UpdateUserPool](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_UpdateUserPool.html) , and a response parameter of [DescribeUserPool](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_DescribeUserPool.html) .", "title": "AdminCreateUserConfig" }, "AliasAttributes": { @@ -45858,7 +45852,7 @@ }, "LambdaConfig": { "$ref": "#/definitions/AWS::Cognito::UserPool.LambdaConfig", - "markdownDescription": "The Lambda trigger configuration information for the new user pool.\n\n> In a push model, event sources (such as Amazon S3 and custom applications) need permission to invoke a function. So you must make an extra call to add permission for these event sources to invoke your Lambda function.\n> \n> For more information on using the Lambda API to add permission, see [AddPermission](https://docs.aws.amazon.com/lambda/latest/dg/API_AddPermission.html) .\n> \n> For adding permission using the AWS CLI , see [add-permission](https://docs.aws.amazon.com/cli/latest/reference/lambda/add-permission.html) .", + "markdownDescription": "A collection of user pool Lambda triggers. Amazon Cognito invokes triggers at several possible stages of authentication operations. Triggers can modify the outcome of the operations that invoked them.", "title": "LambdaConfig" }, "MfaConfiguration": { @@ -45868,7 +45862,7 @@ }, "Policies": { "$ref": "#/definitions/AWS::Cognito::UserPool.Policies", - "markdownDescription": "The policy associated with a user pool.", + "markdownDescription": "A list of user pool policies. Contains the policy that sets password-complexity requirements.\n\nThis data type is a request and response parameter of [CreateUserPool](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_CreateUserPool.html) and [UpdateUserPool](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_UpdateUserPool.html) , and a response parameter of [DescribeUserPool](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_DescribeUserPool.html) .", "title": "Policies" }, "Schema": { @@ -45935,7 +45929,7 @@ }, "VerificationMessageTemplate": { "$ref": "#/definitions/AWS::Cognito::UserPool.VerificationMessageTemplate", - "markdownDescription": "The template for the verification message that the user sees when the app requests permission to access the user's information.", + "markdownDescription": "The template for the verification message that your user pool delivers to users who set an email address or phone number attribute.\n\nSet the email message type that corresponds to your `DefaultEmailOption` selection. For `CONFIRM_WITH_LINK` , specify an `EmailMessageByLink` and leave `EmailMessage` blank. For `CONFIRM_WITH_CODE` , specify an `EmailMessage` and leave `EmailMessageByLink` blank. When you supply both parameters with either choice, Amazon Cognito returns an error.", "title": "VerificationMessageTemplate" } }, @@ -45979,7 +45973,7 @@ "additionalProperties": false, "properties": { "AllowAdminCreateUserOnly": { - "markdownDescription": "Set to `True` if only the administrator is allowed to create user profiles. Set to `False` if users can sign themselves up via an app.", + "markdownDescription": "The setting for allowing self-service sign-up. When `true` , only administrators can create new user profiles. When `false` , users can register themselves and create a new user profile with the [SignUp](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_SignUp.html) operation.", "title": "AllowAdminCreateUserOnly", "type": "boolean" }, @@ -45989,7 +45983,7 @@ "title": "InviteMessageTemplate" }, "UnusedAccountValidityDays": { - "markdownDescription": "The user account expiration limit, in days, after which a new account that hasn't signed in is no longer usable. To reset the account after that time limit, you must call `AdminCreateUser` again, specifying `\"RESEND\"` for the `MessageAction` parameter. The default value for this parameter is 7.\n\n> If you set a value for `TemporaryPasswordValidityDays` in `PasswordPolicy` , that value will be used, and `UnusedAccountValidityDays` will be no longer be an available parameter for that user pool.", + "markdownDescription": "This parameter is no longer in use. Configure the duration of temporary passwords with the `TemporaryPasswordValidityDays` parameter of [PasswordPolicyType](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_PasswordPolicyType.html) . For older user pools that have a `UnusedAccountValidityDays` configuration, that value is effective until you set a value for `TemporaryPasswordValidityDays` .\n\nThe password expiration limit in days for administrator-created users. When this time expires, the user can't sign in with their temporary password. To reset the account after that time limit, you must call `AdminCreateUser` again, specifying `RESEND` for the `MessageAction` parameter.\n\nThe default value for this parameter is 7.", "title": "UnusedAccountValidityDays", "type": "number" } @@ -46100,7 +46094,7 @@ "additionalProperties": false, "properties": { "CreateAuthChallenge": { - "markdownDescription": "Creates an authentication challenge.", + "markdownDescription": "The configuration of a create auth challenge Lambda trigger, one of three triggers in the sequence of the [custom authentication challenge triggers](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-lambda-challenge.html) .", "title": "CreateAuthChallenge", "type": "string" }, @@ -46110,7 +46104,7 @@ "title": "CustomEmailSender" }, "CustomMessage": { - "markdownDescription": "A custom Message AWS Lambda trigger.", + "markdownDescription": "A custom message Lambda trigger. This trigger is an opportunity to customize all SMS and email messages from your user pool. When a custom message trigger is active, your user pool routes all messages to a Lambda function that returns a runtime-customized message subject and body for your user pool to deliver to a user.", "title": "CustomMessage", "type": "string" }, @@ -46120,7 +46114,7 @@ "title": "CustomSMSSender" }, "DefineAuthChallenge": { - "markdownDescription": "Defines the authentication challenge.", + "markdownDescription": "The configuration of a define auth challenge Lambda trigger, one of three triggers in the sequence of the [custom authentication challenge triggers](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-lambda-challenge.html) .", "title": "DefineAuthChallenge", "type": "string" }, @@ -46130,42 +46124,42 @@ "type": "string" }, "PostAuthentication": { - "markdownDescription": "A post-authentication AWS Lambda trigger.", + "markdownDescription": "The configuration of a [post authentication Lambda trigger](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-lambda-post-authentication.html) in a user pool. This trigger can take custom actions after a user signs in.", "title": "PostAuthentication", "type": "string" }, "PostConfirmation": { - "markdownDescription": "A post-confirmation AWS Lambda trigger.", + "markdownDescription": "The configuration of a [post confirmation Lambda trigger](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-lambda-post-confirmation.html) in a user pool. This trigger can take custom actions after a user confirms their user account and their email address or phone number.", "title": "PostConfirmation", "type": "string" }, "PreAuthentication": { - "markdownDescription": "A pre-authentication AWS Lambda trigger.", + "markdownDescription": "The configuration of a [pre authentication trigger](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-lambda-pre-authentication.html) in a user pool. This trigger can evaluate and modify user sign-in events.", "title": "PreAuthentication", "type": "string" }, "PreSignUp": { - "markdownDescription": "A pre-registration AWS Lambda trigger.", + "markdownDescription": "The configuration of a [pre sign-up Lambda trigger](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-lambda-pre-sign-up.html) in a user pool. This trigger evaluates new users and can bypass confirmation, [link a federated user profile](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pools-identity-federation-consolidate-users.html) , or block sign-up requests.", "title": "PreSignUp", "type": "string" }, "PreTokenGeneration": { - "markdownDescription": "The Amazon Resource Name (ARN) of the function that you want to assign to your Lambda trigger.\n\nSet this parameter for legacy purposes. If you also set an ARN in `PreTokenGenerationConfig` , its value must be identical to `PreTokenGeneration` . For new instances of pre token generation triggers, set the `LambdaArn` of `PreTokenGenerationConfig` .\n\nYou can set ``", + "markdownDescription": "The legacy configuration of a [pre token generation Lambda trigger](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-lambda-pre-token-generation.html) in a user pool.\n\nSet this parameter for legacy purposes. If you also set an ARN in `PreTokenGenerationConfig` , its value must be identical to `PreTokenGeneration` . For new instances of pre token generation triggers, set the `LambdaArn` of `PreTokenGenerationConfig` .", "title": "PreTokenGeneration", "type": "string" }, "PreTokenGenerationConfig": { "$ref": "#/definitions/AWS::Cognito::UserPool.PreTokenGenerationConfig", - "markdownDescription": "The detailed configuration of a pre token generation trigger. If you also set an ARN in `PreTokenGeneration` , its value must be identical to `PreTokenGenerationConfig` .", + "markdownDescription": "The detailed configuration of a [pre token generation Lambda trigger](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-lambda-pre-token-generation.html) in a user pool. If you also set an ARN in `PreTokenGeneration` , its value must be identical to `PreTokenGenerationConfig` .", "title": "PreTokenGenerationConfig" }, "UserMigration": { - "markdownDescription": "The user migration Lambda config type.", + "markdownDescription": "The configuration of a [migrate user Lambda trigger](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-lambda-migrate-user.html) in a user pool. This trigger can create user profiles when users sign in or attempt to reset their password with credentials that don't exist yet.", "title": "UserMigration", "type": "string" }, "VerifyAuthChallengeResponse": { - "markdownDescription": "Verifies the authentication challenge response.", + "markdownDescription": "The configuration of a verify auth challenge Lambda trigger, one of three triggers in the sequence of the [custom authentication challenge triggers](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-lambda-challenge.html) .", "title": "VerifyAuthChallengeResponse", "type": "string" } @@ -46197,22 +46191,22 @@ "type": "number" }, "RequireLowercase": { - "markdownDescription": "In the password policy that you have set, refers to whether you have required users to use at least one lowercase letter in their password.", + "markdownDescription": "The requirement in a password policy that users must include at least one lowercase letter in their password.", "title": "RequireLowercase", "type": "boolean" }, "RequireNumbers": { - "markdownDescription": "In the password policy that you have set, refers to whether you have required users to use at least one number in their password.", + "markdownDescription": "The requirement in a password policy that users must include at least one number in their password.", "title": "RequireNumbers", "type": "boolean" }, "RequireSymbols": { - "markdownDescription": "In the password policy that you have set, refers to whether you have required users to use at least one symbol in their password.", + "markdownDescription": "The requirement in a password policy that users must include at least one symbol in their password.", "title": "RequireSymbols", "type": "boolean" }, "RequireUppercase": { - "markdownDescription": "In the password policy that you have set, refers to whether you have required users to use at least one uppercase letter in their password.", + "markdownDescription": "The requirement in a password policy that users must include at least one uppercase letter in their password.", "title": "RequireUppercase", "type": "boolean" }, @@ -46229,7 +46223,7 @@ "properties": { "PasswordPolicy": { "$ref": "#/definitions/AWS::Cognito::UserPool.PasswordPolicy", - "markdownDescription": "The password policy.", + "markdownDescription": "The password policy settings for a user pool, including complexity, history, and length requirements.", "title": "PasswordPolicy" } }, @@ -46338,7 +46332,7 @@ "type": "string" }, "MinLength": { - "markdownDescription": "The minimum length.", + "markdownDescription": "The minimum length of a string attribute value.", "title": "MinLength", "type": "string" } @@ -46366,7 +46360,7 @@ "additionalProperties": false, "properties": { "AdvancedSecurityMode": { - "markdownDescription": "The operating mode of advanced security features in your user pool.", + "markdownDescription": "The operating mode of advanced security features for standard authentication types in your user pool, including username-password and secure remote password (SRP) authentication.", "title": "AdvancedSecurityMode", "type": "string" } @@ -46377,7 +46371,7 @@ "additionalProperties": false, "properties": { "CaseSensitive": { - "markdownDescription": "Specifies whether user name case sensitivity will be applied for all users in the user pool through Amazon Cognito APIs. For most use cases, set case sensitivity to `False` (case insensitive) as a best practice. When usernames and email addresses are case insensitive, users can sign in as the same user when they enter a different capitalization of their user name.\n\nValid values include:\n\n- **True** - Enables case sensitivity for all username input. When this option is set to `True` , users must sign in using the exact capitalization of their given username, such as \u201cUserName\u201d. This is the default value.\n- **False** - Enables case insensitivity for all username input. For example, when this option is set to `False` , users can sign in using `username` , `USERNAME` , or `UserName` . This option also enables both `preferred_username` and `email` alias to be case insensitive, in addition to the `username` attribute.", + "markdownDescription": "Specifies whether user name case sensitivity will be applied for all users in the user pool through Amazon Cognito APIs. For most use cases, set case sensitivity to `False` (case insensitive) as a best practice. When usernames and email addresses are case insensitive, users can sign in as the same user when they enter a different capitalization of their user name.\n\nValid values include:\n\n- **true** - Enables case sensitivity for all username input. When this option is set to `true` , users must sign in using the exact capitalization of their given username, such as \u201cUserName\u201d. This is the default value.\n- **false** - Enables case insensitivity for all username input. For example, when this option is set to `false` , users can sign in using `username` , `USERNAME` , or `UserName` . This option also enables both `preferred_username` and `email` alias to be case insensitive, in addition to the `username` attribute.", "title": "CaseSensitive", "type": "boolean" } @@ -46388,7 +46382,7 @@ "additionalProperties": false, "properties": { "DefaultEmailOption": { - "markdownDescription": "The default email option.", + "markdownDescription": "The configuration of verification emails to contain a clickable link or a verification code.\n\nFor link, your template body must contain link text in the format `{##Click here##}` . \"Click here\" in the example is a customizable string. For code, your template body must contain a code placeholder in the format `{####}` .", "title": "DefaultEmailOption", "type": "string" }, @@ -46554,7 +46548,7 @@ "items": { "type": "string" }, - "markdownDescription": "The list of user attributes that you want your app client to have read-only access to. After your user authenticates in your app, their access token authorizes them to read their own attribute value for any attribute in this list. An example of this kind of activity is when your user selects a link to view their profile information. Your app makes a [GetUser](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_GetUser.html) API request to retrieve and display your user's profile data.\n\nWhen you don't specify the `ReadAttributes` for your app client, your app can read the values of `email_verified` , `phone_number_verified` , and the Standard attributes of your user pool. When your user pool has read access to these default attributes, `ReadAttributes` doesn't return any information. Amazon Cognito only populates `ReadAttributes` in the API response if you have specified your own custom set of read attributes.", + "markdownDescription": "The list of user attributes that you want your app client to have read access to. After your user authenticates in your app, their access token authorizes them to read their own attribute value for any attribute in this list. An example of this kind of activity is when your user selects a link to view their profile information. Your app makes a [GetUser](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_GetUser.html) API request to retrieve and display your user's profile data.\n\nWhen you don't specify the `ReadAttributes` for your app client, your app can read the values of `email_verified` , `phone_number_verified` , and the Standard attributes of your user pool. When your user pool app client has read access to these default attributes, `ReadAttributes` doesn't return any information. Amazon Cognito only populates `ReadAttributes` in the API response if you have specified your own custom set of read attributes.", "title": "ReadAttributes", "type": "array" }, @@ -46625,17 +46619,17 @@ "type": "string" }, "ApplicationId": { - "markdownDescription": "The application ID for an Amazon Pinpoint application.", + "markdownDescription": "Your Amazon Pinpoint project ID.", "title": "ApplicationId", "type": "string" }, "ExternalId": { - "markdownDescription": "The external ID.", + "markdownDescription": "The [external ID](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html) of the role that Amazon Cognito assumes to send analytics data to Amazon Pinpoint.", "title": "ExternalId", "type": "string" }, "RoleArn": { - "markdownDescription": "The ARN of an AWS Identity and Access Management role that authorizes Amazon Cognito to publish events to Amazon Pinpoint analytics.", + "markdownDescription": "The ARN of an AWS Identity and Access Management role that has the permissions required for Amazon Cognito to publish events to Amazon Pinpoint analytics.", "title": "RoleArn", "type": "string" }, @@ -46651,17 +46645,17 @@ "additionalProperties": false, "properties": { "AccessToken": { - "markdownDescription": "A time unit of `seconds` , `minutes` , `hours` , or `days` for the value that you set in the `AccessTokenValidity` parameter. The default `AccessTokenValidity` time unit is hours. `AccessTokenValidity` duration can range from five minutes to one day.", + "markdownDescription": "A time unit for the value that you set in the `AccessTokenValidity` parameter. The default `AccessTokenValidity` time unit is `hours` . `AccessTokenValidity` duration can range from five minutes to one day.", "title": "AccessToken", "type": "string" }, "IdToken": { - "markdownDescription": "A time unit of `seconds` , `minutes` , `hours` , or `days` for the value that you set in the `IdTokenValidity` parameter. The default `IdTokenValidity` time unit is hours. `IdTokenValidity` duration can range from five minutes to one day.", + "markdownDescription": "A time unit for the value that you set in the `IdTokenValidity` parameter. The default `IdTokenValidity` time unit is `hours` . `IdTokenValidity` duration can range from five minutes to one day.", "title": "IdToken", "type": "string" }, "RefreshToken": { - "markdownDescription": "A time unit of `seconds` , `minutes` , `hours` , or `days` for the value that you set in the `RefreshTokenValidity` parameter. The default `RefreshTokenValidity` time unit is days. `RefreshTokenValidity` duration can range from 60 minutes to 10 years.", + "markdownDescription": "A time unit for the value that you set in the `RefreshTokenValidity` parameter. The default `RefreshTokenValidity` time unit is `days` . `RefreshTokenValidity` duration can range from 60 minutes to 10 years.", "title": "RefreshToken", "type": "string" } @@ -47032,12 +47026,12 @@ "additionalProperties": false, "properties": { "ScopeDescription": { - "markdownDescription": "A description of the scope.", + "markdownDescription": "A friendly description of a custom scope.", "title": "ScopeDescription", "type": "string" }, "ScopeName": { - "markdownDescription": "The name of the scope.", + "markdownDescription": "The name of the scope. Amazon Cognito renders custom scopes in the format `resourceServerIdentifier/ScopeName` . For example, if this parameter is `exampleScope` in the resource server with the identifier `exampleResourceServer` , you request and receive the scope `exampleResourceServer/exampleScope` .", "title": "ScopeName", "type": "string" } @@ -47085,7 +47079,7 @@ "properties": { "AccountTakeoverRiskConfiguration": { "$ref": "#/definitions/AWS::Cognito::UserPoolRiskConfigurationAttachment.AccountTakeoverRiskConfigurationType", - "markdownDescription": "The account takeover risk configuration object, including the `NotifyConfiguration` object and `Actions` to take if there is an account takeover.", + "markdownDescription": "The settings for automated responses and notification templates for adaptive authentication with advanced security features.", "title": "AccountTakeoverRiskConfiguration" }, "ClientId": { @@ -47095,16 +47089,16 @@ }, "CompromisedCredentialsRiskConfiguration": { "$ref": "#/definitions/AWS::Cognito::UserPoolRiskConfigurationAttachment.CompromisedCredentialsRiskConfigurationType", - "markdownDescription": "The compromised credentials risk configuration object, including the `EventFilter` and the `EventAction` .", + "markdownDescription": "Settings for compromised-credentials actions and authentication types with advanced security features in full-function `ENFORCED` mode.", "title": "CompromisedCredentialsRiskConfiguration" }, "RiskExceptionConfiguration": { "$ref": "#/definitions/AWS::Cognito::UserPoolRiskConfigurationAttachment.RiskExceptionConfigurationType", - "markdownDescription": "The configuration to override the risk decision.", + "markdownDescription": "Exceptions to the risk evaluation configuration, including always-allow and always-block IP address ranges.", "title": "RiskExceptionConfiguration" }, "UserPoolId": { - "markdownDescription": "The user pool ID.", + "markdownDescription": "The ID of the user pool that has the risk configuration applied.", "title": "UserPoolId", "type": "string" } @@ -47140,12 +47134,12 @@ "additionalProperties": false, "properties": { "EventAction": { - "markdownDescription": "The action to take in response to the account takeover action. Valid values are as follows:\n\n- `BLOCK` Choosing this action will block the request.\n- `MFA_IF_CONFIGURED` Present an MFA challenge if user has configured it, else allow the request.\n- `MFA_REQUIRED` Present an MFA challenge if user has configured it, else block the request.\n- `NO_ACTION` Allow the user to sign in.", + "markdownDescription": "The action to take for the attempted account takeover action for the associated risk level. Valid values are as follows:\n\n- `BLOCK` : Block the request.\n- `MFA_IF_CONFIGURED` : Present an MFA challenge if possible. MFA is possible if the user pool has active MFA methods that the user can set up. For example, if the user pool only supports SMS message MFA but the user doesn't have a phone number attribute, MFA setup isn't possible. If MFA setup isn't possible, allow the request.\n- `MFA_REQUIRED` : Present an MFA challenge if possible. Block the request if a user hasn't set up MFA. To sign in with required MFA, users must have an email address or phone number attribute, or a registered TOTP factor.\n- `NO_ACTION` : Take no action. Permit sign-in.", "title": "EventAction", "type": "string" }, "Notify": { - "markdownDescription": "Flag specifying whether to send a notification.", + "markdownDescription": "Determines whether Amazon Cognito sends a user a notification message when your user pools assesses a user's session at the associated risk level.", "title": "Notify", "type": "boolean" } @@ -47161,17 +47155,17 @@ "properties": { "HighAction": { "$ref": "#/definitions/AWS::Cognito::UserPoolRiskConfigurationAttachment.AccountTakeoverActionType", - "markdownDescription": "Action to take for a high risk.", + "markdownDescription": "The action that you assign to a high-risk assessment by advanced security features.", "title": "HighAction" }, "LowAction": { "$ref": "#/definitions/AWS::Cognito::UserPoolRiskConfigurationAttachment.AccountTakeoverActionType", - "markdownDescription": "Action to take for a low risk.", + "markdownDescription": "The action that you assign to a low-risk assessment by advanced security features.", "title": "LowAction" }, "MediumAction": { "$ref": "#/definitions/AWS::Cognito::UserPoolRiskConfigurationAttachment.AccountTakeoverActionType", - "markdownDescription": "Action to take for a medium risk.", + "markdownDescription": "The action that you assign to a medium-risk assessment by advanced security features.", "title": "MediumAction" } }, @@ -47182,12 +47176,12 @@ "properties": { "Actions": { "$ref": "#/definitions/AWS::Cognito::UserPoolRiskConfigurationAttachment.AccountTakeoverActionsType", - "markdownDescription": "Account takeover risk configuration actions.", + "markdownDescription": "A list of account-takeover actions for each level of risk that Amazon Cognito might assess with advanced security features.", "title": "Actions" }, "NotifyConfiguration": { "$ref": "#/definitions/AWS::Cognito::UserPoolRiskConfigurationAttachment.NotifyConfigurationType", - "markdownDescription": "The notify configuration used to construct email notifications.", + "markdownDescription": "The settings for composing and sending an email message when advanced security features assesses a risk level with adaptive authentication. When you choose to notify users in `AccountTakeoverRiskConfiguration` , Amazon Cognito sends an email message using the method and template that you set with this data type.", "title": "NotifyConfiguration" } }, @@ -47200,7 +47194,7 @@ "additionalProperties": false, "properties": { "EventAction": { - "markdownDescription": "The event action.", + "markdownDescription": "The action that Amazon Cognito takes when it detects compromised credentials.", "title": "EventAction", "type": "string" } @@ -47215,14 +47209,14 @@ "properties": { "Actions": { "$ref": "#/definitions/AWS::Cognito::UserPoolRiskConfigurationAttachment.CompromisedCredentialsActionsType", - "markdownDescription": "The compromised credentials risk configuration actions.", + "markdownDescription": "Settings for the actions that you want your user pool to take when Amazon Cognito detects compromised credentials.", "title": "Actions" }, "EventFilter": { "items": { "type": "string" }, - "markdownDescription": "Perform the action for these events. The default is to perform all events if no event filter is specified.", + "markdownDescription": "Settings for the sign-in activity where you want to configure compromised-credentials actions. Defaults to all events.", "title": "EventFilter", "type": "array" } @@ -47237,26 +47231,26 @@ "properties": { "BlockEmail": { "$ref": "#/definitions/AWS::Cognito::UserPoolRiskConfigurationAttachment.NotifyEmailType", - "markdownDescription": "Email template used when a detected risk event is blocked.", + "markdownDescription": "The template for the email message that your user pool sends when a detected risk event is blocked.", "title": "BlockEmail" }, "From": { - "markdownDescription": "The email address that is sending the email. The address must be either individually verified with Amazon Simple Email Service, or from a domain that has been verified with Amazon SES.", + "markdownDescription": "The email address that sends the email message. The address must be either individually verified with Amazon Simple Email Service, or from a domain that has been verified with Amazon SES.", "title": "From", "type": "string" }, "MfaEmail": { "$ref": "#/definitions/AWS::Cognito::UserPoolRiskConfigurationAttachment.NotifyEmailType", - "markdownDescription": "The multi-factor authentication (MFA) email template used when MFA is challenged as part of a detected risk.", + "markdownDescription": "The template for the email message that your user pool sends when MFA is challenged in response to a detected risk.", "title": "MfaEmail" }, "NoActionEmail": { "$ref": "#/definitions/AWS::Cognito::UserPoolRiskConfigurationAttachment.NotifyEmailType", - "markdownDescription": "The email template used when a detected risk event is allowed.", + "markdownDescription": "The template for the email message that your user pool sends when no action is taken in response to a detected risk.", "title": "NoActionEmail" }, "ReplyTo": { - "markdownDescription": "The destination to which the receiver of an email should reply to.", + "markdownDescription": "The reply-to email address of an email template.", "title": "ReplyTo", "type": "string" }, @@ -47275,17 +47269,17 @@ "additionalProperties": false, "properties": { "HtmlBody": { - "markdownDescription": "The email HTML body.", + "markdownDescription": "The body of an email notification formatted in HTML. Choose an `HtmlBody` or a `TextBody` to send an HTML-formatted or plaintext message, respectively.", "title": "HtmlBody", "type": "string" }, "Subject": { - "markdownDescription": "The email subject.", + "markdownDescription": "The subject of the threat protection email notification.", "title": "Subject", "type": "string" }, "TextBody": { - "markdownDescription": "The email text body.", + "markdownDescription": "The body of an email notification formatted in plaintext. Choose an `HtmlBody` or a `TextBody` to send an HTML-formatted or plaintext message, respectively.", "title": "TextBody", "type": "string" } @@ -47302,7 +47296,7 @@ "items": { "type": "string" }, - "markdownDescription": "Overrides the risk decision to always block the pre-authentication requests. The IP range is in CIDR notation, a compact representation of an IP address and its routing prefix.", + "markdownDescription": "An always-block IP address list. Overrides the risk decision and always blocks authentication requests. This parameter is displayed and set in CIDR notation.", "title": "BlockedIPRangeList", "type": "array" }, @@ -47310,7 +47304,7 @@ "items": { "type": "string" }, - "markdownDescription": "Risk detection isn't performed on the IP addresses in this range list. The IP range is in CIDR notation.", + "markdownDescription": "An always-allow IP address list. Risk detection isn't performed on the IP addresses in this range list. This parameter is displayed and set in CIDR notation.", "title": "SkippedIPRangeList", "type": "array" } @@ -61729,32 +61723,32 @@ "additionalProperties": false, "properties": { "AccessPointArn": { - "markdownDescription": "Specifies the Amazon Resource Name (ARN) of the access point that DataSync uses to access the Amazon EFS file system.", + "markdownDescription": "Specifies the Amazon Resource Name (ARN) of the access point that DataSync uses to mount your Amazon EFS file system.\n\nFor more information, see [Accessing restricted file systems](https://docs.aws.amazon.com/datasync/latest/userguide/create-efs-location.html#create-efs-location-iam) .", "title": "AccessPointArn", "type": "string" }, "Ec2Config": { "$ref": "#/definitions/AWS::DataSync::LocationEFS.Ec2Config", - "markdownDescription": "Specifies the subnet and security groups DataSync uses to access your Amazon EFS file system.", + "markdownDescription": "Specifies the subnet and security groups DataSync uses to connect to one of your Amazon EFS file system's [mount targets](https://docs.aws.amazon.com/efs/latest/ug/accessing-fs.html) .", "title": "Ec2Config" }, "EfsFilesystemArn": { - "markdownDescription": "Specifies the ARN for the Amazon EFS file system.", + "markdownDescription": "Specifies the ARN for your Amazon EFS file system.", "title": "EfsFilesystemArn", "type": "string" }, "FileSystemAccessRoleArn": { - "markdownDescription": "Specifies an AWS Identity and Access Management (IAM) role that DataSync assumes when mounting the Amazon EFS file system.", + "markdownDescription": "Specifies an AWS Identity and Access Management (IAM) role that allows DataSync to access your Amazon EFS file system.\n\nFor information on creating this role, see [Creating a DataSync IAM role for file system access](https://docs.aws.amazon.com/datasync/latest/userguide/create-efs-location.html#create-efs-location-iam-role) .", "title": "FileSystemAccessRoleArn", "type": "string" }, "InTransitEncryption": { - "markdownDescription": "Specifies whether you want DataSync to use Transport Layer Security (TLS) 1.2 encryption when it copies data to or from the Amazon EFS file system.\n\nIf you specify an access point using `AccessPointArn` or an IAM role using `FileSystemAccessRoleArn` , you must set this parameter to `TLS1_2` .", + "markdownDescription": "Specifies whether you want DataSync to use Transport Layer Security (TLS) 1.2 encryption when it transfers data to or from your Amazon EFS file system.\n\nIf you specify an access point using `AccessPointArn` or an IAM role using `FileSystemAccessRoleArn` , you must set this parameter to `TLS1_2` .", "title": "InTransitEncryption", "type": "string" }, "Subdirectory": { - "markdownDescription": "Specifies a mount path for your Amazon EFS file system. This is where DataSync reads or writes data (depending on if this is a source or destination location). By default, DataSync uses the root directory, but you can also include subdirectories.\n\n> You must specify a value with forward slashes (for example, `/path/to/folder` ).", + "markdownDescription": "Specifies a mount path for your Amazon EFS file system. This is where DataSync reads or writes data (depending on if this is a source or destination location) on your file system.\n\nBy default, DataSync uses the root directory (or [access point](https://docs.aws.amazon.com/efs/latest/ug/efs-access-points.html) if you provide one by using `AccessPointArn` ). You can also include subdirectories using forward slashes (for example, `/path/to/folder` ).", "title": "Subdirectory", "type": "string" }, @@ -62354,7 +62348,7 @@ "items": { "type": "string" }, - "markdownDescription": "The Amazon Resource Names (ARNs) of the agents that are used to connect to the HDFS cluster.", + "markdownDescription": "The Amazon Resource Names (ARNs) of the DataSync agents that can connect to your HDFS cluster.", "title": "AgentArns", "type": "array" }, @@ -62531,7 +62525,7 @@ }, "OnPremConfig": { "$ref": "#/definitions/AWS::DataSync::LocationNFS.OnPremConfig", - "markdownDescription": "Specifies the Amazon Resource Name (ARN) of the DataSync agent that want to connect to your NFS file server.\n\nYou can specify more than one agent. For more information, see [Using multiple agents for transfers](https://docs.aws.amazon.com/datasync/latest/userguide/multiple-agents.html) .", + "markdownDescription": "Specifies the Amazon Resource Name (ARN) of the DataSync agent that can connect to your NFS file server.\n\nYou can specify more than one agent. For more information, see [Using multiple DataSync agents](https://docs.aws.amazon.com/datasync/latest/userguide/do-i-need-datasync-agent.html#multiple-agents) .", "title": "OnPremConfig" }, "ServerHostname": { @@ -62597,7 +62591,7 @@ "items": { "type": "string" }, - "markdownDescription": "The Amazon Resource Names (ARNs) of the agents connecting to a transfer location.", + "markdownDescription": "The Amazon Resource Names (ARNs) of the DataSync agents that can connect to your NFS file server.\n\nYou can specify more than one agent. For more information, see [Using multiple DataSync agents](https://docs.aws.amazon.com/datasync/latest/userguide/do-i-need-datasync-agent.html#multiple-agents) .", "title": "AgentArns", "type": "array" } @@ -62651,7 +62645,7 @@ "items": { "type": "string" }, - "markdownDescription": "Specifies the Amazon Resource Names (ARNs) of the DataSync agents that can securely connect with your location.", + "markdownDescription": "Specifies the Amazon Resource Names (ARNs) of the DataSync agents that can connect with your object storage system.", "title": "AgentArns", "type": "array" }, @@ -63497,7 +63491,7 @@ "additionalProperties": false, "properties": { "ScheduleExpression": { - "markdownDescription": "Specifies your task schedule by using a cron expression in UTC time. For information about cron expression syntax, see the [*Amazon EventBridge User Guide*](https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-cron-expressions.html) .", + "markdownDescription": "Specifies your task schedule by using a cron or rate expression.\n\nUse cron expressions for task schedules that run on a specific time and day. For example, the following cron expression creates a task schedule that runs at 8 AM on the first Wednesday of every month:\n\n`cron(0 8 * * 3#1)`\n\nUse rate expressions for task schedules that run on a regular interval. For example, the following rate expression creates a task schedule that runs every 12 hours:\n\n`rate(12 hours)`\n\nFor information about cron and rate expression syntax, see the [*Amazon EventBridge User Guide*](https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-scheduled-rule-pattern.html) .", "title": "ScheduleExpression", "type": "string" }, @@ -64650,7 +64644,7 @@ "type": "string" }, "DisplayName": { - "markdownDescription": "The display name of the farm.", + "markdownDescription": "The display name of the farm.\n\n> This field can store any content. Escape or encode this content before displaying it on a webpage or any other system that might interpret the content of this field.", "title": "DisplayName", "type": "string" }, @@ -64732,7 +64726,7 @@ "type": "string" }, "DisplayName": { - "markdownDescription": "The display name of the fleet summary to update.", + "markdownDescription": "The display name of the fleet summary to update.\n\n> This field can store any content. Escape or encode this content before displaying it on a webpage or any other system that might interpret the content of this field.", "title": "DisplayName", "type": "string" }, @@ -65379,7 +65373,7 @@ "type": "string" }, "DisplayName": { - "markdownDescription": "The display name of the queue summary to update.", + "markdownDescription": "The display name of the queue summary to update.\n\n> This field can store any content. Escape or encode this content before displaying it on a webpage or any other system that might interpret the content of this field.", "title": "DisplayName", "type": "string" }, @@ -65728,7 +65722,7 @@ "additionalProperties": false, "properties": { "DisplayName": { - "markdownDescription": "The display name of the storage profile summary to update.", + "markdownDescription": "The display name of the storage profile summary to update.\n\n> This field can store any content. Escape or encode this content before displaying it on a webpage or any other system that might interpret the content of this field.", "title": "DisplayName", "type": "string" }, @@ -66621,7 +66615,7 @@ "type": "number" }, "CopyTagsToSnapshot": { - "markdownDescription": "", + "markdownDescription": "Set to `true` to copy all tags from the source cluster snapshot to the target cluster snapshot, and otherwise `false` . The default is `false` .", "title": "CopyTagsToSnapshot", "type": "boolean" }, @@ -66654,7 +66648,7 @@ "type": "array" }, "EngineVersion": { - "markdownDescription": "The version number of the database engine to use. The `--engine-version` will default to the latest major engine version. For production workloads, we recommend explicitly declaring this parameter with the intended major engine version.", + "markdownDescription": "The version number of the database engine to use. The `--engine-version` will default to the latest major engine version. For production workloads, we recommend explicitly declaring this parameter with the intended major engine version.\n\nChanging the `EngineVersion` will start an in-place engine version upgrade. Note that in-place engine version upgrade will cause downtime in the cluster. See [Amazon DocumentDB in-place major version upgrade](https://docs.aws.amazon.com/documentdb/latest/developerguide/docdb-mvu.html) before starting an in-place engine version upgrade.", "title": "EngineVersion", "type": "string" }, @@ -66709,7 +66703,7 @@ "type": "string" }, "StorageEncrypted": { - "markdownDescription": "Specifies whether the cluster is encrypted.", + "markdownDescription": "Specifies whether the cluster is encrypted.\n\nIf you specify `SourceDBClusterIdentifier` or `SnapshotIdentifier` and don\u2019t specify `StorageEncrypted` , the encryption property is inherited from the source cluster or snapshot (unless `KMSKeyId` is specified, in which case the restored cluster will be encrypted with that KMS key). If the source is encrypted and `StorageEncrypted` is specified to be true, the restored cluster will be encrypted (if you want to use a different KMS key, specify the `KMSKeyId` property as well). If the source is unencrypted and `StorageEncrypted` is specified to be true, then the `KMSKeyId` property must be specified. If the source is encrypted, don\u2019t specify `StorageEncrypted` to be false as opting out of encryption is not allowed.", "title": "StorageEncrypted", "type": "boolean" }, @@ -66900,7 +66894,7 @@ "type": "string" }, "CACertificateIdentifier": { - "markdownDescription": "The CA certificate identifier to use for the DB instance's server certificate.\n\nFor more information, see [Updating Your Amazon DocumentDB TLS Certificates](https://docs.aws.amazon.com/documentdb/latest/developerguide/ca_cert_rotation.html) and [Encrypting Data in Transit](https://docs.aws.amazon.com/documentdb/latest/developerguide/security.encryption.ssl.html) in the *Amazon DocumentDB Developer Guide* .", + "markdownDescription": "The identifier of the CA certificate for this DB instance.", "title": "CACertificateIdentifier", "type": "string" }, @@ -69829,7 +69823,7 @@ "type": "string" }, "WeightedCapacity": { - "markdownDescription": "The number of units provided by the specified instance type.\n\n> When specifying weights, the price used in the `lowest-price` and `price-capacity-optimized` allocation strategies is per *unit* hour (where the instance price is divided by the specified weight). However, if all the specified weights are above the requested `TargetCapacity` , resulting in only 1 instance being launched, the price used is per *instance* hour.", + "markdownDescription": "The number of units provided by the specified instance type. These are the same units that you chose to set the target capacity in terms of instances, or a performance characteristic such as vCPUs, memory, or I/O.\n\nIf the target capacity divided by this value is not a whole number, Amazon EC2 rounds the number of instances to the next whole number. If this value is not specified, the default is 1.\n\n> When specifying weights, the price used in the `lowest-price` and `price-capacity-optimized` allocation strategies is per *unit* hour (where the instance price is divided by the specified weight). However, if all the specified weights are above the requested `TargetCapacity` , resulting in only 1 instance being launched, the price used is per *instance* hour.", "title": "WeightedCapacity", "type": "number" } @@ -71198,7 +71192,7 @@ "type": "string" }, "Locale": { - "markdownDescription": "The locale of the IPAM pool. In IPAM, the locale is the AWS Region where you want to make an IPAM pool available for allocations. Only resources in the same Region as the locale of the pool can get IP address allocations from the pool. You can only allocate a CIDR for a VPC, for example, from an IPAM pool that shares a locale with the VPC\u2019s Region. Note that once you choose a Locale for a pool, you cannot modify it. If you choose an AWS Region for locale that has not been configured as an operating Region for the IPAM, you'll get an error.", + "markdownDescription": "The locale of the IPAM pool.\n\nThe locale for the pool should be one of the following:\n\n- An AWS Region where you want this IPAM pool to be available for allocations.\n- The network border group for an AWS Local Zone where you want this IPAM pool to be available for allocations ( [supported Local Zones](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-byoip.html#byoip-zone-avail) ). This option is only available for IPAM IPv4 pools in the public scope.\n\nIf you choose an AWS Region for locale that has not been configured as an operating Region for the IPAM, you'll get an error.", "title": "Locale", "type": "string" }, @@ -72628,7 +72622,7 @@ "items": { "$ref": "#/definitions/AWS::EC2::LaunchTemplate.LaunchTemplateTagSpecification" }, - "markdownDescription": "The tags to apply to the launch template on creation. To tag the launch template, the resource type must be `launch-template` .\n\nTo specify the tags for the resources that are created when an instance is launched, you must use [TagSpecifications](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ec2-launchtemplate.html#cfn-ec2-launchtemplate-tagspecifications) .", + "markdownDescription": "The tags to apply to the launch template on creation. To tag the launch template, the resource type must be `launch-template` .\n\nTo specify the tags for resources that are created during instance launch, use [TagSpecifications](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-launchtemplate-launchtemplatedata.html#cfn-ec2-launchtemplate-launchtemplatedata-tagspecifications) .", "title": "TagSpecifications", "type": "array" }, @@ -73217,7 +73211,7 @@ "title": "IamInstanceProfile" }, "ImageId": { - "markdownDescription": "The ID of the AMI. Alternatively, you can specify a Systems Manager parameter, which will resolve to an AMI ID on launch.\n\nValid formats:\n\n- `ami-17characters00000`\n- `resolve:ssm:parameter-name`\n- `resolve:ssm:parameter-name:version-number`\n- `resolve:ssm:parameter-name:label`\n\nFor more information, see [Use a Systems Manager parameter to find an AMI](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/finding-an-ami.html#using-systems-manager-parameter-to-find-AMI) in the *Amazon Elastic Compute Cloud User Guide* .", + "markdownDescription": "The ID of the AMI. Alternatively, you can specify a Systems Manager parameter, which will resolve to an AMI ID on launch.\n\nValid formats:\n\n- `ami-0ac394d6a3example`\n- `resolve:ssm:parameter-name`\n- `resolve:ssm:parameter-name:version-number`\n- `resolve:ssm:parameter-name:label`\n\nFor more information, see [Use a Systems Manager parameter to find an AMI](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/finding-an-ami.html#using-systems-manager-parameter-to-find-AMI) in the *Amazon Elastic Compute Cloud User Guide* .", "title": "ImageId", "type": "string" }, @@ -73317,7 +73311,7 @@ "items": { "$ref": "#/definitions/AWS::EC2::LaunchTemplate.TagSpecification" }, - "markdownDescription": "The tags to apply to the resources that are created during instance launch.\n\nTo tag a resource after it has been created, see [CreateTags](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateTags.html) .\n\nTo tag the launch template itself, use [TagSpecifications](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ec2-launchtemplate.html#cfn-ec2-launchtemplate-tagspecifications) .", + "markdownDescription": "The tags to apply to resources that are created during instance launch.\n\nTo tag the launch template itself, use [TagSpecifications](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ec2-launchtemplate.html#cfn-ec2-launchtemplate-tagspecifications) .", "title": "TagSpecifications", "type": "array" }, @@ -74177,7 +74171,7 @@ "items": { "type": "string" }, - "markdownDescription": "Secondary EIP allocation IDs. For more information, see [Create a NAT gateway](https://docs.aws.amazon.com/vpc/latest/userguide/vpc-nat-gateway.html#nat-gateway-creating) in the *Amazon VPC User Guide* .", + "markdownDescription": "Secondary EIP allocation IDs. For more information, see [Create a NAT gateway](https://docs.aws.amazon.com/vpc/latest/userguide/nat-gateway-working-with.html) in the *Amazon VPC User Guide* .", "title": "SecondaryAllocationIds", "type": "array" }, @@ -75055,7 +75049,7 @@ "type": "string" }, "destinationPrefixListId": { - "markdownDescription": "The prefix of the AWS service .", + "markdownDescription": "The prefix of the AWS service.", "title": "destinationPrefixListId", "type": "string" }, @@ -76350,12 +76344,12 @@ "items": { "$ref": "#/definitions/AWS::EC2::PrefixList.Entry" }, - "markdownDescription": "One or more entries for the prefix list.", + "markdownDescription": "The entries for the prefix list.", "title": "Entries", "type": "array" }, "MaxEntries": { - "markdownDescription": "The maximum number of entries for the prefix list.", + "markdownDescription": "The maximum number of entries for the prefix list. This property is required when you create a prefix list.", "title": "MaxEntries", "type": "number" }, @@ -77684,7 +77678,7 @@ "type": "string" }, "WeightedCapacity": { - "markdownDescription": "The number of units provided by the specified instance type.\n\n> When specifying weights, the price used in the `lowest-price` and `price-capacity-optimized` allocation strategies is per *unit* hour (where the instance price is divided by the specified weight). However, if all the specified weights are above the requested `TargetCapacity` , resulting in only 1 instance being launched, the price used is per *instance* hour.", + "markdownDescription": "The number of units provided by the specified instance type. These are the same units that you chose to set the target capacity in terms of instances, or a performance characteristic such as vCPUs, memory, or I/O.\n\nIf the target capacity divided by this value is not a whole number, Amazon EC2 rounds the number of instances to the next whole number. If this value is not specified, the default is 1.\n\n> When specifying weights, the price used in the `lowestPrice` and `priceCapacityOptimized` allocation strategies is per *unit* hour (where the instance price is divided by the specified weight). However, if all the specified weights are above the requested `TargetCapacity` , resulting in only 1 instance being launched, the price used is per *instance* hour.", "title": "WeightedCapacity", "type": "number" } @@ -77907,7 +77901,7 @@ "type": "string" }, "WeightedCapacity": { - "markdownDescription": "The number of units provided by the specified instance type. These are the same units that you chose to set the target capacity in terms of instances, or a performance characteristic such as vCPUs, memory, or I/O.\n\nIf the target capacity divided by this value is not a whole number, Amazon EC2 rounds the number of instances to the next whole number. If this value is not specified, the default is 1.", + "markdownDescription": "The number of units provided by the specified instance type. These are the same units that you chose to set the target capacity in terms of instances, or a performance characteristic such as vCPUs, memory, or I/O.\n\nIf the target capacity divided by this value is not a whole number, Amazon EC2 rounds the number of instances to the next whole number. If this value is not specified, the default is 1.\n\n> When specifying weights, the price used in the `lowestPrice` and `priceCapacityOptimized` allocation strategies is per *unit* hour (where the instance price is divided by the specified weight). However, if all the specified weights are above the requested `TargetCapacity` , resulting in only 1 instance being launched, the price used is per *instance* hour.", "title": "WeightedCapacity", "type": "number" } @@ -78232,7 +78226,7 @@ "type": "string" }, "EnableDns64": { - "markdownDescription": "Indicates whether DNS queries made to the Amazon-provided DNS Resolver in this subnet should return synthetic IPv6 addresses for IPv4-only destinations. For more information, see [DNS64 and NAT64](https://docs.aws.amazon.com/vpc/latest/userguide/vpc-nat-gateway.html#nat-gateway-nat64-dns64) in the *Amazon Virtual Private Cloud User Guide* .", + "markdownDescription": "Indicates whether DNS queries made to the Amazon-provided DNS Resolver in this subnet should return synthetic IPv6 addresses for IPv4-only destinations.\n\n> You must first configure a NAT gateway in a public subnet (separate from the subnet containing the IPv6-only workloads). For example, the subnet containing the NAT gateway should have a `0.0.0.0/0` route pointing to the internet gateway. For more information, see [Configure DNS64 and NAT64](https://docs.aws.amazon.com/vpc/latest/userguide/nat-gateway-nat64-dns64.html#nat-gateway-nat64-dns64-walkthrough) in the *Amazon Virtual Private Cloud User Guide* .", "title": "EnableDns64", "type": "boolean" }, @@ -78260,8 +78254,6 @@ "items": { "type": "string" }, - "markdownDescription": "The IPv6 network ranges for the subnet, in CIDR notation.", - "title": "Ipv6CidrBlocks", "type": "array" }, "Ipv6IpamPoolId": { @@ -80524,7 +80516,7 @@ "additionalProperties": false, "properties": { "PolicyDocument": { - "markdownDescription": "An endpoint policy, which controls access to the service from the VPC. The default endpoint policy allows full access to the service. Endpoint policies are supported only for gateway and interface endpoints.\n\nFor CloudFormation templates in YAML, you can provide the policy in JSON or YAML format. AWS CloudFormation converts YAML policies to JSON format before calling the API to create or modify the VPC endpoint.", + "markdownDescription": "An endpoint policy, which controls access to the service from the VPC. The default endpoint policy allows full access to the service. Endpoint policies are supported only for gateway and interface endpoints.\n\nFor CloudFormation templates in YAML, you can provide the policy in JSON or YAML format. For example, if you have a JSON policy, you can convert it to YAML before including it in the YAML template, and AWS CloudFormation converts the policy to JSON format before calling the API actions for AWS PrivateLink . Alternatively, you can include the JSON directly in the YAML, as shown in the following `Properties` section:\n\n`Properties: VpcEndpointType: 'Interface' ServiceName: !Sub 'com.amazonaws.${AWS::Region}.logs' PolicyDocument: '{ \"Version\":\"2012-10-17\", \"Statement\": [{ \"Effect\":\"Allow\", \"Principal\":\"*\", \"Action\":[\"logs:Describe*\",\"logs:Get*\",\"logs:List*\",\"logs:FilterLogEvents\"], \"Resource\":\"*\" }] }'`", "title": "PolicyDocument", "type": "object" }, @@ -82813,7 +82805,7 @@ "additionalProperties": false, "properties": { "EncryptionType": { - "markdownDescription": "The encryption type to use.\n\nIf you use the `KMS` encryption type, the contents of the repository will be encrypted using server-side encryption with AWS Key Management Service key stored in AWS KMS . When you use AWS KMS to encrypt your data, you can either use the default AWS managed AWS KMS key for Amazon ECR, or specify your own AWS KMS key, which you already created. For more information, see [Protecting data using server-side encryption with an AWS KMS key stored in AWS Key Management Service (SSE-KMS)](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html) in the *Amazon Simple Storage Service Console Developer Guide* .\n\nIf you use the `AES256` encryption type, Amazon ECR uses server-side encryption with Amazon S3-managed encryption keys which encrypts the images in the repository using an AES-256 encryption algorithm. For more information, see [Protecting data using server-side encryption with Amazon S3-managed encryption keys (SSE-S3)](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) in the *Amazon Simple Storage Service Console Developer Guide* .", + "markdownDescription": "The encryption type to use.\n\nIf you use the `KMS` encryption type, the contents of the repository will be encrypted using server-side encryption with AWS Key Management Service key stored in AWS KMS . When you use AWS KMS to encrypt your data, you can either use the default AWS managed AWS KMS key for Amazon ECR, or specify your own AWS KMS key, which you already created.\n\nIf you use the `KMS_DSSE` encryption type, the contents of the repository will be encrypted with two layers of encryption using server-side encryption with the AWS KMS Management Service key stored in AWS KMS . Similar to the `KMS` encryption type, you can either use the default AWS managed AWS KMS key for Amazon ECR, or specify your own AWS KMS key, which you've already created.\n\nIf you use the `AES256` encryption type, Amazon ECR uses server-side encryption with Amazon S3-managed encryption keys which encrypts the images in the repository using an AES256 encryption algorithm.\n\nFor more information, see [Amazon ECR encryption at rest](https://docs.aws.amazon.com/AmazonECR/latest/userguide/encryption-at-rest.html) in the *Amazon Elastic Container Registry User Guide* .", "title": "EncryptionType", "type": "string" }, @@ -82894,37 +82886,37 @@ "items": { "type": "string" }, - "markdownDescription": "", + "markdownDescription": "A list of enumerable Strings representing the repository creation scenarios that this template will apply towards. The two supported scenarios are PULL_THROUGH_CACHE and REPLICATION", "title": "AppliedFor", "type": "array" }, "Description": { - "markdownDescription": "", + "markdownDescription": "The description associated with the repository creation template.", "title": "Description", "type": "string" }, "EncryptionConfiguration": { "$ref": "#/definitions/AWS::ECR::RepositoryCreationTemplate.EncryptionConfiguration", - "markdownDescription": "The encryption configuration for the repository. This determines how the contents of your repository are encrypted at rest.\n\nBy default, when no encryption configuration is set or the `AES256` encryption type is used, Amazon ECR uses server-side encryption with Amazon S3-managed encryption keys which encrypts your data at rest using an AES-256 encryption algorithm. This does not require any action on your part.\n\nFor more control over the encryption of the contents of your repository, you can use server-side encryption with AWS Key Management Service key stored in AWS Key Management Service ( AWS KMS ) to encrypt your images. For more information, see [Amazon ECR encryption at rest](https://docs.aws.amazon.com/AmazonECR/latest/userguide/encryption-at-rest.html) in the *Amazon Elastic Container Registry User Guide* .", + "markdownDescription": "The encryption configuration associated with the repository creation template.", "title": "EncryptionConfiguration" }, "ImageTagMutability": { - "markdownDescription": "", + "markdownDescription": "The tag mutability setting for the repository. If this parameter is omitted, the default setting of MUTABLE will be used which will allow image tags to be overwritten. If IMMUTABLE is specified, all image tags within the repository will be immutable which will prevent them from being overwritten.", "title": "ImageTagMutability", "type": "string" }, "LifecyclePolicy": { - "markdownDescription": "", + "markdownDescription": "The lifecycle policy to use for repositories created using the template.", "title": "LifecyclePolicy", "type": "string" }, "Prefix": { - "markdownDescription": "", + "markdownDescription": "The repository namespace prefix associated with the repository creation template.", "title": "Prefix", "type": "string" }, "RepositoryPolicy": { - "markdownDescription": "", + "markdownDescription": "he repository policy to apply to repositories created using the template. A repository policy is a permissions policy associated with a repository to control access permissions.", "title": "RepositoryPolicy", "type": "string" }, @@ -82932,7 +82924,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "The tags attached to the resource.", + "markdownDescription": "The metadata to apply to the repository to help you categorize and organize. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.", "title": "ResourceTags", "type": "array" } @@ -82968,7 +82960,7 @@ "additionalProperties": false, "properties": { "EncryptionType": { - "markdownDescription": "The encryption type to use.\n\nIf you use the `KMS` encryption type, the contents of the repository will be encrypted using server-side encryption with AWS Key Management Service key stored in AWS KMS . When you use AWS KMS to encrypt your data, you can either use the default AWS managed AWS KMS key for Amazon ECR, or specify your own AWS KMS key, which you already created. For more information, see [Protecting data using server-side encryption with an AWS KMS key stored in AWS Key Management Service (SSE-KMS)](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html) in the *Amazon Simple Storage Service Console Developer Guide* .\n\nIf you use the `AES256` encryption type, Amazon ECR uses server-side encryption with Amazon S3-managed encryption keys which encrypts the images in the repository using an AES-256 encryption algorithm. For more information, see [Protecting data using server-side encryption with Amazon S3-managed encryption keys (SSE-S3)](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) in the *Amazon Simple Storage Service Console Developer Guide* .", + "markdownDescription": "The encryption type to use.\n\nIf you use the `KMS` encryption type, the contents of the repository will be encrypted using server-side encryption with AWS Key Management Service key stored in AWS KMS . When you use AWS KMS to encrypt your data, you can either use the default AWS managed AWS KMS key for Amazon ECR, or specify your own AWS KMS key, which you already created.\n\nIf you use the `KMS_DSSE` encryption type, the contents of the repository will be encrypted with two layers of encryption using server-side encryption with the AWS KMS Management Service key stored in AWS KMS . Similar to the `KMS` encryption type, you can either use the default AWS managed AWS KMS key for Amazon ECR, or specify your own AWS KMS key, which you've already created.\n\nIf you use the `AES256` encryption type, Amazon ECR uses server-side encryption with Amazon S3-managed encryption keys which encrypts the images in the repository using an AES256 encryption algorithm.\n\nFor more information, see [Amazon ECR encryption at rest](https://docs.aws.amazon.com/AmazonECR/latest/userguide/encryption-at-rest.html) in the *Amazon Elastic Container Registry User Guide* .", "title": "EncryptionType", "type": "string" }, @@ -83101,7 +83093,7 @@ "type": "number" }, "MaximumScalingStepSize": { - "markdownDescription": "The maximum number of Amazon EC2 instances that Amazon ECS will scale out at one time. The scale in process is not affected by this parameter. If this parameter is omitted, the default value of `10000` is used.", + "markdownDescription": "The maximum number of Amazon EC2 instances that Amazon ECS will scale out at one time. If this parameter is omitted, the default value of `10000` is used.", "title": "MaximumScalingStepSize", "type": "number" }, @@ -83733,7 +83725,7 @@ "items": { "type": "string" }, - "markdownDescription": "The IDs of the security groups associated with the task or service. If you don't specify a security group, the default security group for the VPC is used. There's a limit of 5 security groups that can be specified per `AwsVpcConfiguration` .\n\n> All specified security groups must be from the same VPC.", + "markdownDescription": "The IDs of the security groups associated with the task or service. If you don't specify a security group, the default security group for the VPC is used. There's a limit of 5 security groups that can be specified per `awsvpcConfiguration` .\n\n> All specified security groups must be from the same VPC.", "title": "SecurityGroups", "type": "array" }, @@ -83741,7 +83733,7 @@ "items": { "type": "string" }, - "markdownDescription": "The IDs of the subnets associated with the task or service. There's a limit of 16 subnets that can be specified per `AwsVpcConfiguration` .\n\n> All specified subnets must be from the same VPC.", + "markdownDescription": "The IDs of the subnets associated with the task or service. There's a limit of 16 subnets that can be specified per `awsvpcConfiguration` .\n\n> All specified subnets must be from the same VPC.", "title": "Subnets", "type": "array" } @@ -83832,12 +83824,12 @@ "title": "DeploymentCircuitBreaker" }, "MaximumPercent": { - "markdownDescription": "If a service is using the rolling update ( `ECS` ) deployment type, the `maximumPercent` parameter represents an upper limit on the number of your service's tasks that are allowed in the `RUNNING` or `PENDING` state during a deployment, as a percentage of the `desiredCount` (rounded down to the nearest integer). This parameter enables you to define the deployment batch size. For example, if your service is using the `REPLICA` service scheduler and has a `desiredCount` of four tasks and a `maximumPercent` value of 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default `maximumPercent` value for a service using the `REPLICA` service scheduler is 200%.\n\nIf a service is using either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types and tasks that use the EC2 launch type, the *maximum percent* value is set to the default value and is used to define the upper limit on the number of the tasks in the service that remain in the `RUNNING` state while the container instances are in the `DRAINING` state. If the tasks in the service use the Fargate launch type, the maximum percent value is not used, although it is returned when describing your service.", + "markdownDescription": "If a service is using the rolling update ( `ECS` ) deployment type, the `maximumPercent` parameter represents an upper limit on the number of your service's tasks that are allowed in the `RUNNING` or `PENDING` state during a deployment, as a percentage of the `desiredCount` (rounded down to the nearest integer). This parameter enables you to define the deployment batch size. For example, if your service is using the `REPLICA` service scheduler and has a `desiredCount` of four tasks and a `maximumPercent` value of 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default `maximumPercent` value for a service using the `REPLICA` service scheduler is 200%.\n\nIf a service is using either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types, and tasks in the service use the EC2 launch type, the *maximum percent* value is set to the default value. The *maximum percent* value is used to define the upper limit on the number of the tasks in the service that remain in the `RUNNING` state while the container instances are in the `DRAINING` state.\n\n> You can't specify a custom `maximumPercent` value for a service that uses either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types and has tasks that use the EC2 launch type. \n\nIf the tasks in the service use the Fargate launch type, the maximum percent value is not used, although it is returned when describing your service.", "title": "MaximumPercent", "type": "number" }, "MinimumHealthyPercent": { - "markdownDescription": "If a service is using the rolling update ( `ECS` ) deployment type, the `minimumHealthyPercent` represents a lower limit on the number of your service's tasks that must remain in the `RUNNING` state during a deployment, as a percentage of the `desiredCount` (rounded up to the nearest integer). This parameter enables you to deploy without using additional cluster capacity. For example, if your service has a `desiredCount` of four tasks and a `minimumHealthyPercent` of 50%, the service scheduler may stop two existing tasks to free up cluster capacity before starting two new tasks.\n\nFor services that *do not* use a load balancer, the following should be noted:\n\n- A service is considered healthy if all essential containers within the tasks in the service pass their health checks.\n- If a task has no essential containers with a health check defined, the service scheduler will wait for 40 seconds after a task reaches a `RUNNING` state before the task is counted towards the minimum healthy percent total.\n- If a task has one or more essential containers with a health check defined, the service scheduler will wait for the task to reach a healthy status before counting it towards the minimum healthy percent total. A task is considered healthy when all essential containers within the task have passed their health checks. The amount of time the service scheduler can wait for is determined by the container health check settings.\n\nFor services that *do* use a load balancer, the following should be noted:\n\n- If a task has no essential containers with a health check defined, the service scheduler will wait for the load balancer target group health check to return a healthy status before counting the task towards the minimum healthy percent total.\n- If a task has an essential container with a health check defined, the service scheduler will wait for both the task to reach a healthy status and the load balancer target group health check to return a healthy status before counting the task towards the minimum healthy percent total.\n\nThe default value for a replica service for `minimumHealthyPercent` is 100%. The default `minimumHealthyPercent` value for a service using the `DAEMON` service schedule is 0% for the AWS CLI , the AWS SDKs, and the APIs and 50% for the AWS Management Console.\n\nThe minimum number of healthy tasks during a deployment is the `desiredCount` multiplied by the `minimumHealthyPercent` /100, rounded up to the nearest integer value.\n\nIf a service is using either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types and is running tasks that use the EC2 launch type, the *minimum healthy percent* value is set to the default value and is used to define the lower limit on the number of the tasks in the service that remain in the `RUNNING` state while the container instances are in the `DRAINING` state. If a service is using either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types and is running tasks that use the Fargate launch type, the minimum healthy percent value is not used, although it is returned when describing your service.", + "markdownDescription": "If a service is using the rolling update ( `ECS` ) deployment type, the `minimumHealthyPercent` represents a lower limit on the number of your service's tasks that must remain in the `RUNNING` state during a deployment, as a percentage of the `desiredCount` (rounded up to the nearest integer). This parameter enables you to deploy without using additional cluster capacity. For example, if your service has a `desiredCount` of four tasks and a `minimumHealthyPercent` of 50%, the service scheduler may stop two existing tasks to free up cluster capacity before starting two new tasks.\n\nFor services that *do not* use a load balancer, the following should be noted:\n\n- A service is considered healthy if all essential containers within the tasks in the service pass their health checks.\n- If a task has no essential containers with a health check defined, the service scheduler will wait for 40 seconds after a task reaches a `RUNNING` state before the task is counted towards the minimum healthy percent total.\n- If a task has one or more essential containers with a health check defined, the service scheduler will wait for the task to reach a healthy status before counting it towards the minimum healthy percent total. A task is considered healthy when all essential containers within the task have passed their health checks. The amount of time the service scheduler can wait for is determined by the container health check settings.\n\nFor services that *do* use a load balancer, the following should be noted:\n\n- If a task has no essential containers with a health check defined, the service scheduler will wait for the load balancer target group health check to return a healthy status before counting the task towards the minimum healthy percent total.\n- If a task has an essential container with a health check defined, the service scheduler will wait for both the task to reach a healthy status and the load balancer target group health check to return a healthy status before counting the task towards the minimum healthy percent total.\n\nThe default value for a replica service for `minimumHealthyPercent` is 100%. The default `minimumHealthyPercent` value for a service using the `DAEMON` service schedule is 0% for the AWS CLI , the AWS SDKs, and the APIs and 50% for the AWS Management Console.\n\nThe minimum number of healthy tasks during a deployment is the `desiredCount` multiplied by the `minimumHealthyPercent` /100, rounded up to the nearest integer value.\n\nIf a service is using either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types and is running tasks that use the EC2 launch type, the *minimum healthy percent* value is set to the default value. The *minimum healthy percent* value is used to define the lower limit on the number of the tasks in the service that remain in the `RUNNING` state while the container instances are in the `DRAINING` state.\n\n> You can't specify a custom `minimumHealthyPercent` value for a service that uses either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types and has tasks that use the EC2 launch type. \n\nIf a service is using either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types and is running tasks that use the Fargate launch type, the minimum healthy percent value is not used, although it is returned when describing your service.", "title": "MinimumHealthyPercent", "type": "number" } @@ -83918,7 +83910,7 @@ }, "Options": { "additionalProperties": true, - "markdownDescription": "The configuration options to send to the log driver. This parameter requires version 1.19 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`", + "markdownDescription": "The configuration options to send to the log driver.\n\nThe options you can specify depend on the log driver. Some of the options you can specify when you use the `awslogs` log driver to route logs to Amazon CloudWatch include the following:\n\n- **awslogs-create-group** - Required: No\n\nSpecify whether you want the log group to be created automatically. If this option isn't specified, it defaults to `false` .\n\n> Your IAM policy must include the `logs:CreateLogGroup` permission before you attempt to use `awslogs-create-group` .\n- **awslogs-region** - Required: Yes\n\nSpecify the AWS Region that the `awslogs` log driver is to send your Docker logs to. You can choose to send all of your logs from clusters in different Regions to a single region in CloudWatch Logs. This is so that they're all visible in one location. Otherwise, you can separate them by Region for more granularity. Make sure that the specified log group exists in the Region that you specify with this option.\n- **awslogs-group** - Required: Yes\n\nMake sure to specify a log group that the `awslogs` log driver sends its log streams to.\n- **awslogs-stream-prefix** - Required: Yes, when using the Fargate launch type.Optional for the EC2 launch type, required for the Fargate launch type.\n\nUse the `awslogs-stream-prefix` option to associate a log stream with the specified prefix, the container name, and the ID of the Amazon ECS task that the container belongs to. If you specify a prefix with this option, then the log stream takes the format `prefix-name/container-name/ecs-task-id` .\n\nIf you don't specify a prefix with this option, then the log stream is named after the container ID that's assigned by the Docker daemon on the container instance. Because it's difficult to trace logs back to the container that sent them with just the Docker container ID (which is only available on the container instance), we recommend that you specify a prefix with this option.\n\nFor Amazon ECS services, you can use the service name as the prefix. Doing so, you can trace log streams to the service that the container belongs to, the name of the container that sent them, and the ID of the task that the container belongs to.\n\nYou must specify a stream-prefix for your logs to have your logs appear in the Log pane when using the Amazon ECS console.\n- **awslogs-datetime-format** - Required: No\n\nThis option defines a multiline start pattern in Python `strftime` format. A log message consists of a line that matches the pattern and any following lines that don\u2019t match the pattern. The matched line is the delimiter between log messages.\n\nOne example of a use case for using this format is for parsing output such as a stack dump, which might otherwise be logged in multiple entries. The correct pattern allows it to be captured in a single entry.\n\nFor more information, see [awslogs-datetime-format](https://docs.aws.amazon.com/https://docs.docker.com/config/containers/logging/awslogs/#awslogs-datetime-format) .\n\nYou cannot configure both the `awslogs-datetime-format` and `awslogs-multiline-pattern` options.\n\n> Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance.\n- **awslogs-multiline-pattern** - Required: No\n\nThis option defines a multiline start pattern that uses a regular expression. A log message consists of a line that matches the pattern and any following lines that don\u2019t match the pattern. The matched line is the delimiter between log messages.\n\nFor more information, see [awslogs-multiline-pattern](https://docs.aws.amazon.com/https://docs.docker.com/config/containers/logging/awslogs/#awslogs-multiline-pattern) .\n\nThis option is ignored if `awslogs-datetime-format` is also configured.\n\nYou cannot configure both the `awslogs-datetime-format` and `awslogs-multiline-pattern` options.\n\n> Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance.\n- **mode** - Required: No\n\nValid values: `non-blocking` | `blocking`\n\nThis option defines the delivery mode of log messages from the container to CloudWatch Logs. The delivery mode you choose affects application availability when the flow of logs from container to CloudWatch is interrupted.\n\nIf you use the `blocking` mode and the flow of logs to CloudWatch is interrupted, calls from container code to write to the `stdout` and `stderr` streams will block. The logging thread of the application will block as a result. This may cause the application to become unresponsive and lead to container healthcheck failure.\n\nIf you use the `non-blocking` mode, the container's logs are instead stored in an in-memory intermediate buffer configured with the `max-buffer-size` option. This prevents the application from becoming unresponsive when logs cannot be sent to CloudWatch. We recommend using this mode if you want to ensure service availability and are okay with some log loss. For more information, see [Preventing log loss with non-blocking mode in the `awslogs` container log driver](https://docs.aws.amazon.com/containers/preventing-log-loss-with-non-blocking-mode-in-the-awslogs-container-log-driver/) .\n- **max-buffer-size** - Required: No\n\nDefault value: `1m`\n\nWhen `non-blocking` mode is used, the `max-buffer-size` log option controls the size of the buffer that's used for intermediate message storage. Make sure to specify an adequate buffer size based on your application. When the buffer fills up, further logs cannot be stored. Logs that cannot be stored are lost.\n\nTo route logs using the `splunk` log router, you need to specify a `splunk-token` and a `splunk-url` .\n\nWhen you use the `awsfirelens` log router to route logs to an AWS Service or AWS Partner Network destination for log storage and analytics, you can set the `log-driver-buffer-limit` option to limit the number of events that are buffered in memory, before being sent to the log router container. It can help to resolve potential log loss issue because high throughput might result in memory running out for the buffer inside of Docker.\n\nOther options you can specify when using `awsfirelens` to route logs depend on the destination. When you export logs to Amazon Data Firehose, you can specify the AWS Region with `region` and a name for the log stream with `delivery_stream` .\n\nWhen you export logs to Amazon Kinesis Data Streams, you can specify an AWS Region with `region` and a data stream name with `stream` .\n\nWhen you export logs to Amazon OpenSearch Service, you can specify options like `Name` , `Host` (OpenSearch Service endpoint without protocol), `Port` , `Index` , `Type` , `Aws_auth` , `Aws_region` , `Suppress_Type_Name` , and `tls` .\n\nWhen you export logs to Amazon S3, you can specify the bucket using the `bucket` option. You can also specify `region` , `total_file_size` , `upload_timeout` , and `use_put_object` as options.\n\nThis parameter requires version 1.19 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" @@ -84036,7 +84028,7 @@ }, "LogConfiguration": { "$ref": "#/definitions/AWS::ECS::Service.LogConfiguration", - "markdownDescription": "The log configuration for the container. This parameter maps to `LogConfig` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--log-driver` option to [`docker run`](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/commandline/run/) .\n\nBy default, containers use the same logging driver that the Docker daemon uses. However, the container might use a different logging driver than the Docker daemon by specifying a log driver configuration in the container definition. For more information about the options for different supported log drivers, see [Configure logging drivers](https://docs.aws.amazon.com/https://docs.docker.com/engine/admin/logging/overview/) in the Docker documentation.\n\nUnderstand the following when specifying a log configuration for your containers.\n\n- Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon. Additional log drivers may be available in future releases of the Amazon ECS container agent.\n\nFor tasks on AWS Fargate , the supported log drivers are `awslogs` , `splunk` , and `awsfirelens` .\n\nFor tasks hosted on Amazon EC2 instances, the supported log drivers are `awslogs` , `fluentd` , `gelf` , `json-file` , `journald` , `syslog` , `splunk` , and `awsfirelens` .\n- This parameter requires version 1.18 of the Docker Remote API or greater on your container instance.\n- For tasks that are hosted on Amazon EC2 instances, the Amazon ECS container agent must register the available logging drivers with the `ECS_AVAILABLE_LOGGING_DRIVERS` environment variable before containers placed on that instance can use these log configuration options. For more information, see [Amazon ECS container agent configuration](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) in the *Amazon Elastic Container Service Developer Guide* .\n- For tasks that are on AWS Fargate , because you don't have access to the underlying infrastructure your tasks are hosted on, any additional software needed must be installed outside of the task. For example, the Fluentd output aggregators or a remote host running Logstash to send Gelf logs to.", + "markdownDescription": "The log configuration for the container. This parameter maps to `LogConfig` in the docker container create command and the `--log-driver` option to docker run.\n\nBy default, containers use the same logging driver that the Docker daemon uses. However, the container might use a different logging driver than the Docker daemon by specifying a log driver configuration in the container definition.\n\nUnderstand the following when specifying a log configuration for your containers.\n\n- Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon. Additional log drivers may be available in future releases of the Amazon ECS container agent.\n\nFor tasks on AWS Fargate , the supported log drivers are `awslogs` , `splunk` , and `awsfirelens` .\n\nFor tasks hosted on Amazon EC2 instances, the supported log drivers are `awslogs` , `fluentd` , `gelf` , `json-file` , `journald` , `syslog` , `splunk` , and `awsfirelens` .\n- This parameter requires version 1.18 of the Docker Remote API or greater on your container instance.\n- For tasks that are hosted on Amazon EC2 instances, the Amazon ECS container agent must register the available logging drivers with the `ECS_AVAILABLE_LOGGING_DRIVERS` environment variable before containers placed on that instance can use these log configuration options. For more information, see [Amazon ECS container agent configuration](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) in the *Amazon Elastic Container Service Developer Guide* .\n- For tasks that are on AWS Fargate , because you don't have access to the underlying infrastructure your tasks are hosted on, any additional software needed must be installed outside of the task. For example, the Fluentd output aggregators or a remote host running Logstash to send Gelf logs to.", "title": "LogConfiguration" }, "Namespace": { @@ -84330,7 +84322,7 @@ "type": "array" }, "IpcMode": { - "markdownDescription": "The IPC resource namespace to use for the containers in the task. The valid values are `host` , `task` , or `none` . If `host` is specified, then all containers within the tasks that specified the `host` IPC mode on the same container instance share the same IPC resources with the host Amazon EC2 instance. If `task` is specified, all containers within the specified task share the same IPC resources. If `none` is specified, then IPC resources within the containers of a task are private and not shared with other containers in a task or on the container instance. If no value is specified, then the IPC resource namespace sharing depends on the Docker daemon setting on the container instance. For more information, see [IPC settings](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#ipc-settings---ipc) in the *Docker run reference* .\n\nIf the `host` IPC mode is used, be aware that there is a heightened risk of undesired IPC namespace expose. For more information, see [Docker security](https://docs.aws.amazon.com/https://docs.docker.com/engine/security/security/) .\n\nIf you are setting namespaced kernel parameters using `systemControls` for the containers in the task, the following will apply to your IPC resource namespace. For more information, see [System Controls](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html) in the *Amazon Elastic Container Service Developer Guide* .\n\n- For tasks that use the `host` IPC mode, IPC namespace related `systemControls` are not supported.\n- For tasks that use the `task` IPC mode, IPC namespace related `systemControls` will apply to all containers within a task.\n\n> This parameter is not supported for Windows containers or tasks run on AWS Fargate .", + "markdownDescription": "The IPC resource namespace to use for the containers in the task. The valid values are `host` , `task` , or `none` . If `host` is specified, then all containers within the tasks that specified the `host` IPC mode on the same container instance share the same IPC resources with the host Amazon EC2 instance. If `task` is specified, all containers within the specified task share the same IPC resources. If `none` is specified, then IPC resources within the containers of a task are private and not shared with other containers in a task or on the container instance. If no value is specified, then the IPC resource namespace sharing depends on the Docker daemon setting on the container instance.\n\nIf the `host` IPC mode is used, be aware that there is a heightened risk of undesired IPC namespace expose.\n\nIf you are setting namespaced kernel parameters using `systemControls` for the containers in the task, the following will apply to your IPC resource namespace. For more information, see [System Controls](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html) in the *Amazon Elastic Container Service Developer Guide* .\n\n- For tasks that use the `host` IPC mode, IPC namespace related `systemControls` are not supported.\n- For tasks that use the `task` IPC mode, IPC namespace related `systemControls` will apply to all containers within a task.\n\n> This parameter is not supported for Windows containers or tasks run on AWS Fargate .", "title": "IpcMode", "type": "string" }, @@ -84340,12 +84332,12 @@ "type": "string" }, "NetworkMode": { - "markdownDescription": "The Docker networking mode to use for the containers in the task. The valid values are `none` , `bridge` , `awsvpc` , and `host` . If no network mode is specified, the default is `bridge` .\n\nFor Amazon ECS tasks on Fargate, the `awsvpc` network mode is required. For Amazon ECS tasks on Amazon EC2 Linux instances, any network mode can be used. For Amazon ECS tasks on Amazon EC2 Windows instances, `` or `awsvpc` can be used. If the network mode is set to `none` , you cannot specify port mappings in your container definitions, and the tasks containers do not have external connectivity. The `host` and `awsvpc` network modes offer the highest networking performance for containers because they use the EC2 network stack instead of the virtualized network stack provided by the `bridge` mode.\n\nWith the `host` and `awsvpc` network modes, exposed container ports are mapped directly to the corresponding host port (for the `host` network mode) or the attached elastic network interface port (for the `awsvpc` network mode), so you cannot take advantage of dynamic host port mappings.\n\n> When using the `host` network mode, you should not run containers using the root user (UID 0). It is considered best practice to use a non-root user. \n\nIf the network mode is `awsvpc` , the task is allocated an elastic network interface, and you must specify a `NetworkConfiguration` value when you create a service or run a task with the task definition. For more information, see [Task Networking](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-networking.html) in the *Amazon Elastic Container Service Developer Guide* .\n\nIf the network mode is `host` , you cannot run multiple instantiations of the same task on a single container instance when port mappings are used.\n\nFor more information, see [Network settings](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#network-settings) in the *Docker run reference* .", + "markdownDescription": "The Docker networking mode to use for the containers in the task. The valid values are `none` , `bridge` , `awsvpc` , and `host` . If no network mode is specified, the default is `bridge` .\n\nFor Amazon ECS tasks on Fargate, the `awsvpc` network mode is required. For Amazon ECS tasks on Amazon EC2 Linux instances, any network mode can be used. For Amazon ECS tasks on Amazon EC2 Windows instances, `` or `awsvpc` can be used. If the network mode is set to `none` , you cannot specify port mappings in your container definitions, and the tasks containers do not have external connectivity. The `host` and `awsvpc` network modes offer the highest networking performance for containers because they use the EC2 network stack instead of the virtualized network stack provided by the `bridge` mode.\n\nWith the `host` and `awsvpc` network modes, exposed container ports are mapped directly to the corresponding host port (for the `host` network mode) or the attached elastic network interface port (for the `awsvpc` network mode), so you cannot take advantage of dynamic host port mappings.\n\n> When using the `host` network mode, you should not run containers using the root user (UID 0). It is considered best practice to use a non-root user. \n\nIf the network mode is `awsvpc` , the task is allocated an elastic network interface, and you must specify a [NetworkConfiguration](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_NetworkConfiguration.html) value when you create a service or run a task with the task definition. For more information, see [Task Networking](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-networking.html) in the *Amazon Elastic Container Service Developer Guide* .\n\nIf the network mode is `host` , you cannot run multiple instantiations of the same task on a single container instance when port mappings are used.", "title": "NetworkMode", "type": "string" }, "PidMode": { - "markdownDescription": "The process namespace to use for the containers in the task. The valid values are `host` or `task` . On Fargate for Linux containers, the only valid value is `task` . For example, monitoring sidecars might need `pidMode` to access information about other containers running in the same task.\n\nIf `host` is specified, all containers within the tasks that specified the `host` PID mode on the same container instance share the same process namespace with the host Amazon EC2 instance.\n\nIf `task` is specified, all containers within the specified task share the same process namespace.\n\nIf no value is specified, the default is a private namespace for each container. For more information, see [PID settings](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#pid-settings---pid) in the *Docker run reference* .\n\nIf the `host` PID mode is used, there's a heightened risk of undesired process namespace exposure. For more information, see [Docker security](https://docs.aws.amazon.com/https://docs.docker.com/engine/security/security/) .\n\n> This parameter is not supported for Windows containers. > This parameter is only supported for tasks that are hosted on AWS Fargate if the tasks are using platform version `1.4.0` or later (Linux). This isn't supported for Windows containers on Fargate.", + "markdownDescription": "The process namespace to use for the containers in the task. The valid values are `host` or `task` . On Fargate for Linux containers, the only valid value is `task` . For example, monitoring sidecars might need `pidMode` to access information about other containers running in the same task.\n\nIf `host` is specified, all containers within the tasks that specified the `host` PID mode on the same container instance share the same process namespace with the host Amazon EC2 instance.\n\nIf `task` is specified, all containers within the specified task share the same process namespace.\n\nIf no value is specified, the default is a private namespace for each container.\n\nIf the `host` PID mode is used, there's a heightened risk of undesired process namespace exposure.\n\n> This parameter is not supported for Windows containers. > This parameter is only supported for tasks that are hosted on AWS Fargate if the tasks are using platform version `1.4.0` or later (Linux). This isn't supported for Windows containers on Fargate.", "title": "PidMode", "type": "string" }, @@ -84384,7 +84376,7 @@ "type": "array" }, "TaskRoleArn": { - "markdownDescription": "The short name or full Amazon Resource Name (ARN) of the AWS Identity and Access Management role that grants containers in the task permission to call AWS APIs on your behalf. For informationabout the required IAM roles for Amazon ECS, see [IAM roles for Amazon ECS](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/security-ecs-iam-role-overview.html) in the *Amazon Elastic Container Service Developer Guide* .", + "markdownDescription": "The short name or full Amazon Resource Name (ARN) of the AWS Identity and Access Management role that grants containers in the task permission to call AWS APIs on your behalf. For more information, see [Amazon ECS Task Role](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html) in the *Amazon Elastic Container Service Developer Guide* .\n\nIAM roles for tasks on Windows require that the `-EnableTaskIAMRole` option is set when you launch the Amazon ECS-optimized Windows AMI. Your containers must also run some configuration code to use the feature. For more information, see [Windows IAM roles for tasks](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/windows_task_IAM_roles.html) in the *Amazon Elastic Container Service Developer Guide* .\n\n> String validation is done on the ECS side. If an invalid string value is given for `TaskRoleArn` , it may cause the Cloudformation job to hang.", "title": "TaskRoleArn", "type": "string" }, @@ -84442,12 +84434,12 @@ "items": { "type": "string" }, - "markdownDescription": "The command that's passed to the container. This parameter maps to `Cmd` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `COMMAND` parameter to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) . For more information, see [https://docs.docker.com/engine/reference/builder/#cmd](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/builder/#cmd) . If there are multiple arguments, each argument is a separated string in the array.", + "markdownDescription": "The command that's passed to the container. This parameter maps to `Cmd` in the docker container create command and the `COMMAND` parameter to docker run. If there are multiple arguments, each argument is a separated string in the array.", "title": "Command", "type": "array" }, "Cpu": { - "markdownDescription": "The number of `cpu` units reserved for the container. This parameter maps to `CpuShares` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--cpu-shares` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\nThis field is optional for tasks using the Fargate launch type, and the only requirement is that the total amount of CPU reserved for all containers within a task be lower than the task-level `cpu` value.\n\n> You can determine the number of CPU units that are available per EC2 instance type by multiplying the vCPUs listed for that instance type on the [Amazon EC2 Instances](https://docs.aws.amazon.com/ec2/instance-types/) detail page by 1,024. \n\nLinux containers share unallocated CPU units with other containers on the container instance with the same ratio as their allocated amount. For example, if you run a single-container task on a single-core instance type with 512 CPU units specified for that container, and that's the only task running on the container instance, that container could use the full 1,024 CPU unit share at any given time. However, if you launched another copy of the same task on that container instance, each task is guaranteed a minimum of 512 CPU units when needed. Moreover, each container could float to higher CPU usage if the other container was not using it. If both tasks were 100% active all of the time, they would be limited to 512 CPU units.\n\nOn Linux container instances, the Docker daemon on the container instance uses the CPU value to calculate the relative CPU share ratios for running containers. For more information, see [CPU share constraint](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#cpu-share-constraint) in the Docker documentation. The minimum valid CPU share value that the Linux kernel allows is 2, and the maximum valid CPU share value that the Linux kernel allows is 262144. However, the CPU parameter isn't required, and you can use CPU values below 2 or above 262144 in your container definitions. For CPU values below 2 (including null) or above 262144, the behavior varies based on your Amazon ECS container agent version:\n\n- *Agent versions less than or equal to 1.1.0:* Null and zero CPU values are passed to Docker as 0, which Docker then converts to 1,024 CPU shares. CPU values of 1 are passed to Docker as 1, which the Linux kernel converts to two CPU shares.\n- *Agent versions greater than or equal to 1.2.0:* Null, zero, and CPU values of 1 are passed to Docker as 2.\n- *Agent versions greater than or equal to 1.84.0:* CPU values greater than 256 vCPU are passed to Docker as 256, which is equivalent to 262144 CPU shares.\n\nOn Windows container instances, the CPU limit is enforced as an absolute limit, or a quota. Windows containers only have access to the specified amount of CPU that's described in the task definition. A null or zero CPU value is passed to Docker as `0` , which Windows interprets as 1% of one CPU.", + "markdownDescription": "The number of `cpu` units reserved for the container. This parameter maps to `CpuShares` in the docker container create commandand the `--cpu-shares` option to docker run.\n\nThis field is optional for tasks using the Fargate launch type, and the only requirement is that the total amount of CPU reserved for all containers within a task be lower than the task-level `cpu` value.\n\n> You can determine the number of CPU units that are available per EC2 instance type by multiplying the vCPUs listed for that instance type on the [Amazon EC2 Instances](https://docs.aws.amazon.com/ec2/instance-types/) detail page by 1,024. \n\nLinux containers share unallocated CPU units with other containers on the container instance with the same ratio as their allocated amount. For example, if you run a single-container task on a single-core instance type with 512 CPU units specified for that container, and that's the only task running on the container instance, that container could use the full 1,024 CPU unit share at any given time. However, if you launched another copy of the same task on that container instance, each task is guaranteed a minimum of 512 CPU units when needed. Moreover, each container could float to higher CPU usage if the other container was not using it. If both tasks were 100% active all of the time, they would be limited to 512 CPU units.\n\nOn Linux container instances, the Docker daemon on the container instance uses the CPU value to calculate the relative CPU share ratios for running containers. The minimum valid CPU share value that the Linux kernel allows is 2, and the maximum valid CPU share value that the Linux kernel allows is 262144. However, the CPU parameter isn't required, and you can use CPU values below 2 or above 262144 in your container definitions. For CPU values below 2 (including null) or above 262144, the behavior varies based on your Amazon ECS container agent version:\n\n- *Agent versions less than or equal to 1.1.0:* Null and zero CPU values are passed to Docker as 0, which Docker then converts to 1,024 CPU shares. CPU values of 1 are passed to Docker as 1, which the Linux kernel converts to two CPU shares.\n- *Agent versions greater than or equal to 1.2.0:* Null, zero, and CPU values of 1 are passed to Docker as 2.\n- *Agent versions greater than or equal to 1.84.0:* CPU values greater than 256 vCPU are passed to Docker as 256, which is equivalent to 262144 CPU shares.\n\nOn Windows container instances, the CPU limit is enforced as an absolute limit, or a quota. Windows containers only have access to the specified amount of CPU that's described in the task definition. A null or zero CPU value is passed to Docker as `0` , which Windows interprets as 1% of one CPU.", "title": "Cpu", "type": "number" }, @@ -84468,7 +84460,7 @@ "type": "array" }, "DisableNetworking": { - "markdownDescription": "When this parameter is true, networking is off within the container. This parameter maps to `NetworkDisabled` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) .\n\n> This parameter is not supported for Windows containers.", + "markdownDescription": "When this parameter is true, networking is off within the container. This parameter maps to `NetworkDisabled` in the docker container create command.\n\n> This parameter is not supported for Windows containers.", "title": "DisableNetworking", "type": "boolean" }, @@ -84476,7 +84468,7 @@ "items": { "type": "string" }, - "markdownDescription": "A list of DNS search domains that are presented to the container. This parameter maps to `DnsSearch` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--dns-search` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> This parameter is not supported for Windows containers.", + "markdownDescription": "A list of DNS search domains that are presented to the container. This parameter maps to `DnsSearch` in the docker container create command and the `--dns-search` option to docker run.\n\n> This parameter is not supported for Windows containers.", "title": "DnsSearchDomains", "type": "array" }, @@ -84484,13 +84476,13 @@ "items": { "type": "string" }, - "markdownDescription": "A list of DNS servers that are presented to the container. This parameter maps to `Dns` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--dns` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> This parameter is not supported for Windows containers.", + "markdownDescription": "A list of DNS servers that are presented to the container. This parameter maps to `Dns` in the docker container create command and the `--dns` option to docker run.\n\n> This parameter is not supported for Windows containers.", "title": "DnsServers", "type": "array" }, "DockerLabels": { "additionalProperties": true, - "markdownDescription": "A key/value map of labels to add to the container. This parameter maps to `Labels` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--label` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) . This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`", + "markdownDescription": "A key/value map of labels to add to the container. This parameter maps to `Labels` in the docker container create command and the `--label` option to docker run. This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" @@ -84503,7 +84495,7 @@ "items": { "type": "string" }, - "markdownDescription": "A list of strings to provide custom configuration for multiple security systems. For more information about valid values, see [Docker Run Security Configuration](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) . This field isn't valid for containers in tasks using the Fargate launch type.\n\nFor Linux tasks on EC2, this parameter can be used to reference custom labels for SELinux and AppArmor multi-level security systems.\n\nFor any tasks on EC2, this parameter can be used to reference a credential spec file that configures a container for Active Directory authentication. For more information, see [Using gMSAs for Windows Containers](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/windows-gmsa.html) and [Using gMSAs for Linux Containers](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/linux-gmsa.html) in the *Amazon Elastic Container Service Developer Guide* .\n\nThis parameter maps to `SecurityOpt` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--security-opt` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> The Amazon ECS container agent running on a container instance must register with the `ECS_SELINUX_CAPABLE=true` or `ECS_APPARMOR_CAPABLE=true` environment variables before containers placed on that instance can use these security options. For more information, see [Amazon ECS Container Agent Configuration](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) in the *Amazon Elastic Container Service Developer Guide* . \n\nFor more information about valid values, see [Docker Run Security Configuration](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\nValid values: \"no-new-privileges\" | \"apparmor:PROFILE\" | \"label:value\" | \"credentialspec:CredentialSpecFilePath\"", + "markdownDescription": "A list of strings to provide custom configuration for multiple security systems. This field isn't valid for containers in tasks using the Fargate launch type.\n\nFor Linux tasks on EC2, this parameter can be used to reference custom labels for SELinux and AppArmor multi-level security systems.\n\nFor any tasks on EC2, this parameter can be used to reference a credential spec file that configures a container for Active Directory authentication. For more information, see [Using gMSAs for Windows Containers](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/windows-gmsa.html) and [Using gMSAs for Linux Containers](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/linux-gmsa.html) in the *Amazon Elastic Container Service Developer Guide* .\n\nThis parameter maps to `SecurityOpt` in the docker container create command and the `--security-opt` option to docker run.\n\n> The Amazon ECS container agent running on a container instance must register with the `ECS_SELINUX_CAPABLE=true` or `ECS_APPARMOR_CAPABLE=true` environment variables before containers placed on that instance can use these security options. For more information, see [Amazon ECS Container Agent Configuration](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) in the *Amazon Elastic Container Service Developer Guide* . \n\nValid values: \"no-new-privileges\" | \"apparmor:PROFILE\" | \"label:value\" | \"credentialspec:CredentialSpecFilePath\"", "title": "DockerSecurityOptions", "type": "array" }, @@ -84511,7 +84503,7 @@ "items": { "type": "string" }, - "markdownDescription": "> Early versions of the Amazon ECS container agent don't properly handle `entryPoint` parameters. If you have problems using `entryPoint` , update your container agent or enter your commands and arguments as `command` array items instead. \n\nThe entry point that's passed to the container. This parameter maps to `Entrypoint` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--entrypoint` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) . For more information, see [https://docs.docker.com/engine/reference/builder/#entrypoint](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/builder/#entrypoint) .", + "markdownDescription": "> Early versions of the Amazon ECS container agent don't properly handle `entryPoint` parameters. If you have problems using `entryPoint` , update your container agent or enter your commands and arguments as `command` array items instead. \n\nThe entry point that's passed to the container. This parameter maps to `Entrypoint` in the docker container create command and the `--entrypoint` option to docker run.", "title": "EntryPoint", "type": "array" }, @@ -84519,7 +84511,7 @@ "items": { "$ref": "#/definitions/AWS::ECS::TaskDefinition.KeyValuePair" }, - "markdownDescription": "The environment variables to pass to a container. This parameter maps to `Env` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--env` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> We don't recommend that you use plaintext environment variables for sensitive information, such as credential data.", + "markdownDescription": "The environment variables to pass to a container. This parameter maps to `Env` in the docker container create command and the `--env` option to docker run.\n\n> We don't recommend that you use plaintext environment variables for sensitive information, such as credential data.", "title": "Environment", "type": "array" }, @@ -84527,7 +84519,7 @@ "items": { "$ref": "#/definitions/AWS::ECS::TaskDefinition.EnvironmentFile" }, - "markdownDescription": "A list of files containing the environment variables to pass to a container. This parameter maps to the `--env-file` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\nYou can specify up to ten environment files. The file must have a `.env` file extension. Each line in an environment file contains an environment variable in `VARIABLE=VALUE` format. Lines beginning with `#` are treated as comments and are ignored. For more information about the environment variable file syntax, see [Declare default environment variables in file](https://docs.aws.amazon.com/https://docs.docker.com/compose/env-file/) .\n\nIf there are environment variables specified using the `environment` parameter in a container definition, they take precedence over the variables contained within an environment file. If multiple environment files are specified that contain the same variable, they're processed from the top down. We recommend that you use unique variable names. For more information, see [Specifying Environment Variables](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/taskdef-envfiles.html) in the *Amazon Elastic Container Service Developer Guide* .", + "markdownDescription": "A list of files containing the environment variables to pass to a container. This parameter maps to the `--env-file` option to docker run.\n\nYou can specify up to ten environment files. The file must have a `.env` file extension. Each line in an environment file contains an environment variable in `VARIABLE=VALUE` format. Lines beginning with `#` are treated as comments and are ignored.\n\nIf there are environment variables specified using the `environment` parameter in a container definition, they take precedence over the variables contained within an environment file. If multiple environment files are specified that contain the same variable, they're processed from the top down. We recommend that you use unique variable names. For more information, see [Specifying Environment Variables](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/taskdef-envfiles.html) in the *Amazon Elastic Container Service Developer Guide* .", "title": "EnvironmentFiles", "type": "array" }, @@ -84540,7 +84532,7 @@ "items": { "$ref": "#/definitions/AWS::ECS::TaskDefinition.HostEntry" }, - "markdownDescription": "A list of hostnames and IP address mappings to append to the `/etc/hosts` file on the container. This parameter maps to `ExtraHosts` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--add-host` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> This parameter isn't supported for Windows containers or tasks that use the `awsvpc` network mode.", + "markdownDescription": "A list of hostnames and IP address mappings to append to the `/etc/hosts` file on the container. This parameter maps to `ExtraHosts` in the docker container create command and the `--add-host` option to docker run.\n\n> This parameter isn't supported for Windows containers or tasks that use the `awsvpc` network mode.", "title": "ExtraHosts", "type": "array" }, @@ -84551,21 +84543,21 @@ }, "HealthCheck": { "$ref": "#/definitions/AWS::ECS::TaskDefinition.HealthCheck", - "markdownDescription": "The container health check command and associated configuration parameters for the container. This parameter maps to `HealthCheck` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `HEALTHCHECK` parameter of [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .", + "markdownDescription": "The container health check command and associated configuration parameters for the container. This parameter maps to `HealthCheck` in the docker container create command and the `HEALTHCHECK` parameter of docker run.", "title": "HealthCheck" }, "Hostname": { - "markdownDescription": "The hostname to use for your container. This parameter maps to `Hostname` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--hostname` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> The `hostname` parameter is not supported if you're using the `awsvpc` network mode.", + "markdownDescription": "The hostname to use for your container. This parameter maps to `Hostname` in the docker container create command and the `--hostname` option to docker run.\n\n> The `hostname` parameter is not supported if you're using the `awsvpc` network mode.", "title": "Hostname", "type": "string" }, "Image": { - "markdownDescription": "The image used to start a container. This string is passed directly to the Docker daemon. By default, images in the Docker Hub registry are available. Other repositories are specified with either `*repository-url* / *image* : *tag*` or `*repository-url* / *image* @ *digest*` . Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons, periods, forward slashes, and number signs are allowed. This parameter maps to `Image` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `IMAGE` parameter of [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n- When a new task starts, the Amazon ECS container agent pulls the latest version of the specified image and tag for the container to use. However, subsequent updates to a repository image aren't propagated to already running tasks.\n- Images in Amazon ECR repositories can be specified by either using the full `registry/repository:tag` or `registry/repository@digest` . For example, `012345678910.dkr.ecr..amazonaws.com/:latest` or `012345678910.dkr.ecr..amazonaws.com/@sha256:94afd1f2e64d908bc90dbca0035a5b567EXAMPLE` .\n- Images in official repositories on Docker Hub use a single name (for example, `ubuntu` or `mongo` ).\n- Images in other repositories on Docker Hub are qualified with an organization name (for example, `amazon/amazon-ecs-agent` ).\n- Images in other online repositories are qualified further by a domain name (for example, `quay.io/assemblyline/ubuntu` ).", + "markdownDescription": "The image used to start a container. This string is passed directly to the Docker daemon. By default, images in the Docker Hub registry are available. Other repositories are specified with either `*repository-url* / *image* : *tag*` or `*repository-url* / *image* @ *digest*` . Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons, periods, forward slashes, and number signs are allowed. This parameter maps to `Image` in the docker container create command and the `IMAGE` parameter of docker run.\n\n- When a new task starts, the Amazon ECS container agent pulls the latest version of the specified image and tag for the container to use. However, subsequent updates to a repository image aren't propagated to already running tasks.\n- Images in Amazon ECR repositories can be specified by either using the full `registry/repository:tag` or `registry/repository@digest` . For example, `012345678910.dkr.ecr..amazonaws.com/:latest` or `012345678910.dkr.ecr..amazonaws.com/@sha256:94afd1f2e64d908bc90dbca0035a5b567EXAMPLE` .\n- Images in official repositories on Docker Hub use a single name (for example, `ubuntu` or `mongo` ).\n- Images in other repositories on Docker Hub are qualified with an organization name (for example, `amazon/amazon-ecs-agent` ).\n- Images in other online repositories are qualified further by a domain name (for example, `quay.io/assemblyline/ubuntu` ).", "title": "Image", "type": "string" }, "Interactive": { - "markdownDescription": "When this parameter is `true` , you can deploy containerized applications that require `stdin` or a `tty` to be allocated. This parameter maps to `OpenStdin` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--interactive` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .", + "markdownDescription": "When this parameter is `true` , you can deploy containerized applications that require `stdin` or a `tty` to be allocated. This parameter maps to `OpenStdin` in the docker container create command and the `--interactive` option to docker run.", "title": "Interactive", "type": "boolean" }, @@ -84573,7 +84565,7 @@ "items": { "type": "string" }, - "markdownDescription": "The `links` parameter allows containers to communicate with each other without the need for port mappings. This parameter is only supported if the network mode of a task definition is `bridge` . The `name:internalName` construct is analogous to `name:alias` in Docker links. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. For more information about linking Docker containers, go to [Legacy container links](https://docs.aws.amazon.com/https://docs.docker.com/network/links/) in the Docker documentation. This parameter maps to `Links` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--link` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> This parameter is not supported for Windows containers. > Containers that are collocated on a single container instance may be able to communicate with each other without requiring links or host port mappings. Network isolation is achieved on the container instance using security groups and VPC settings.", + "markdownDescription": "The `links` parameter allows containers to communicate with each other without the need for port mappings. This parameter is only supported if the network mode of a task definition is `bridge` . The `name:internalName` construct is analogous to `name:alias` in Docker links. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed.. This parameter maps to `Links` in the docker container create command and the `--link` option to docker run.\n\n> This parameter is not supported for Windows containers. > Containers that are collocated on a single container instance may be able to communicate with each other without requiring links or host port mappings. Network isolation is achieved on the container instance using security groups and VPC settings.", "title": "Links", "type": "array" }, @@ -84584,7 +84576,7 @@ }, "LogConfiguration": { "$ref": "#/definitions/AWS::ECS::TaskDefinition.LogConfiguration", - "markdownDescription": "The log configuration specification for the container.\n\nThis parameter maps to `LogConfig` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--log-driver` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) . By default, containers use the same logging driver that the Docker daemon uses. However, the container may use a different logging driver than the Docker daemon by specifying a log driver with this parameter in the container definition. To use a different logging driver for a container, the log system must be configured properly on the container instance (or on a different log server for remote logging options). For more information on the options for different supported log drivers, see [Configure logging drivers](https://docs.aws.amazon.com/https://docs.docker.com/engine/admin/logging/overview/) in the Docker documentation.\n\n> Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon (shown in the [LogConfiguration](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_LogConfiguration.html) data type). Additional log drivers may be available in future releases of the Amazon ECS container agent. \n\nThis parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`\n\n> The Amazon ECS container agent running on a container instance must register the logging drivers available on that instance with the `ECS_AVAILABLE_LOGGING_DRIVERS` environment variable before containers placed on that instance can use these log configuration options. For more information, see [Amazon ECS Container Agent Configuration](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) in the *Amazon Elastic Container Service Developer Guide* .", + "markdownDescription": "The log configuration specification for the container.\n\nThis parameter maps to `LogConfig` in the docker Create a container command and the `--log-driver` option to docker run. By default, containers use the same logging driver that the Docker daemon uses. However, the container may use a different logging driver than the Docker daemon by specifying a log driver with this parameter in the container definition. To use a different logging driver for a container, the log system must be configured properly on the container instance (or on a different log server for remote logging options). For more information on the options for different supported log drivers, see [Configure logging drivers](https://docs.aws.amazon.com/https://docs.docker.com/engine/admin/logging/overview/) in the Docker documentation.\n\n> Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon (shown in the [LogConfiguration](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_LogConfiguration.html) data type). Additional log drivers may be available in future releases of the Amazon ECS container agent. \n\nThis parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`\n\n> The Amazon ECS container agent running on a container instance must register the logging drivers available on that instance with the `ECS_AVAILABLE_LOGGING_DRIVERS` environment variable before containers placed on that instance can use these log configuration options. For more information, see [Amazon ECS Container Agent Configuration](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) in the *Amazon Elastic Container Service Developer Guide* .", "title": "LogConfiguration" }, "Memory": { @@ -84593,7 +84585,7 @@ "type": "number" }, "MemoryReservation": { - "markdownDescription": "The soft limit (in MiB) of memory to reserve for the container. When system memory is under heavy contention, Docker attempts to keep the container memory to this soft limit. However, your container can consume more memory when it needs to, up to either the hard limit specified with the `memory` parameter (if applicable), or all of the available memory on the container instance, whichever comes first. This parameter maps to `MemoryReservation` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--memory-reservation` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\nIf a task-level memory value is not specified, you must specify a non-zero integer for one or both of `memory` or `memoryReservation` in a container definition. If you specify both, `memory` must be greater than `memoryReservation` . If you specify `memoryReservation` , then that value is subtracted from the available memory resources for the container instance where the container is placed. Otherwise, the value of `memory` is used.\n\nFor example, if your container normally uses 128 MiB of memory, but occasionally bursts to 256 MiB of memory for short periods of time, you can set a `memoryReservation` of 128 MiB, and a `memory` hard limit of 300 MiB. This configuration would allow the container to only reserve 128 MiB of memory from the remaining resources on the container instance, but also allow the container to consume more memory resources when needed.\n\nThe Docker 20.10.0 or later daemon reserves a minimum of 6 MiB of memory for a container. So, don't specify less than 6 MiB of memory for your containers.\n\nThe Docker 19.03.13-ce or earlier daemon reserves a minimum of 4 MiB of memory for a container. So, don't specify less than 4 MiB of memory for your containers.", + "markdownDescription": "The soft limit (in MiB) of memory to reserve for the container. When system memory is under heavy contention, Docker attempts to keep the container memory to this soft limit. However, your container can consume more memory when it needs to, up to either the hard limit specified with the `memory` parameter (if applicable), or all of the available memory on the container instance, whichever comes first. This parameter maps to `MemoryReservation` in the docker container create command and the `--memory-reservation` option to docker run.\n\nIf a task-level memory value is not specified, you must specify a non-zero integer for one or both of `memory` or `memoryReservation` in a container definition. If you specify both, `memory` must be greater than `memoryReservation` . If you specify `memoryReservation` , then that value is subtracted from the available memory resources for the container instance where the container is placed. Otherwise, the value of `memory` is used.\n\nFor example, if your container normally uses 128 MiB of memory, but occasionally bursts to 256 MiB of memory for short periods of time, you can set a `memoryReservation` of 128 MiB, and a `memory` hard limit of 300 MiB. This configuration would allow the container to only reserve 128 MiB of memory from the remaining resources on the container instance, but also allow the container to consume more memory resources when needed.\n\nThe Docker 20.10.0 or later daemon reserves a minimum of 6 MiB of memory for a container. So, don't specify less than 6 MiB of memory for your containers.\n\nThe Docker 19.03.13-ce or earlier daemon reserves a minimum of 4 MiB of memory for a container. So, don't specify less than 4 MiB of memory for your containers.", "title": "MemoryReservation", "type": "number" }, @@ -84601,12 +84593,12 @@ "items": { "$ref": "#/definitions/AWS::ECS::TaskDefinition.MountPoint" }, - "markdownDescription": "The mount points for data volumes in your container.\n\nThis parameter maps to `Volumes` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--volume` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\nWindows containers can mount whole directories on the same drive as `$env:ProgramData` . Windows containers can't mount directories on a different drive, and mount point can't be across drives.", + "markdownDescription": "The mount points for data volumes in your container.\n\nThis parameter maps to `Volumes` in the docker container create command and the `--volume` option to docker run.\n\nWindows containers can mount whole directories on the same drive as `$env:ProgramData` . Windows containers can't mount directories on a different drive, and mount point can't be across drives.", "title": "MountPoints", "type": "array" }, "Name": { - "markdownDescription": "The name of a container. If you're linking multiple containers together in a task definition, the `name` of one container can be entered in the `links` of another container to connect the containers. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. This parameter maps to `name` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--name` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .", + "markdownDescription": "The name of a container. If you're linking multiple containers together in a task definition, the `name` of one container can be entered in the `links` of another container to connect the containers. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. This parameter maps to `name` in the docker container create command and the `--name` option to docker run.", "title": "Name", "type": "string" }, @@ -84619,17 +84611,17 @@ "type": "array" }, "Privileged": { - "markdownDescription": "When this parameter is true, the container is given elevated privileges on the host container instance (similar to the `root` user). This parameter maps to `Privileged` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--privileged` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> This parameter is not supported for Windows containers or tasks run on AWS Fargate .", + "markdownDescription": "When this parameter is true, the container is given elevated privileges on the host container instance (similar to the `root` user). This parameter maps to `Privileged` in the docker container create command and the `--privileged` option to docker run\n\n> This parameter is not supported for Windows containers or tasks run on AWS Fargate .", "title": "Privileged", "type": "boolean" }, "PseudoTerminal": { - "markdownDescription": "When this parameter is `true` , a TTY is allocated. This parameter maps to `Tty` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--tty` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .", + "markdownDescription": "When this parameter is `true` , a TTY is allocated. This parameter maps to `Tty` in the docker container create command and the `--tty` option to docker run.", "title": "PseudoTerminal", "type": "boolean" }, "ReadonlyRootFilesystem": { - "markdownDescription": "When this parameter is true, the container is given read-only access to its root file system. This parameter maps to `ReadonlyRootfs` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--read-only` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> This parameter is not supported for Windows containers.", + "markdownDescription": "When this parameter is true, the container is given read-only access to its root file system. This parameter maps to `ReadonlyRootfs` in the docker container create command and the `--read-only` option to docker run.\n\n> This parameter is not supported for Windows containers.", "title": "ReadonlyRootFilesystem", "type": "boolean" }, @@ -84660,7 +84652,7 @@ "type": "number" }, "StopTimeout": { - "markdownDescription": "Time duration (in seconds) to wait before the container is forcefully killed if it doesn't exit normally on its own.\n\nFor tasks using the Fargate launch type, the task or service requires the following platforms:\n\n- Linux platform version `1.3.0` or later.\n- Windows platform version `1.0.0` or later.\n\nThe max stop timeout value is 120 seconds and if the parameter is not specified, the default value of 30 seconds is used.\n\nFor tasks that use the EC2 launch type, if the `stopTimeout` parameter isn't specified, the value set for the Amazon ECS container agent configuration variable `ECS_CONTAINER_STOP_TIMEOUT` is used. If neither the `stopTimeout` parameter or the `ECS_CONTAINER_STOP_TIMEOUT` agent configuration variable are set, then the default values of 30 seconds for Linux containers and 30 seconds on Windows containers are used. Your container instances require at least version 1.26.0 of the container agent to use a container stop timeout value. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see [Updating the Amazon ECS Container Agent](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-update.html) in the *Amazon Elastic Container Service Developer Guide* . If you're using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the `ecs-init` package. If your container instances are launched from version `20190301` or later, then they contain the required versions of the container agent and `ecs-init` . For more information, see [Amazon ECS-optimized Linux AMI](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html) in the *Amazon Elastic Container Service Developer Guide* .\n\nThe valid values are 2-120 seconds.", + "markdownDescription": "Time duration (in seconds) to wait before the container is forcefully killed if it doesn't exit normally on its own.\n\nFor tasks using the Fargate launch type, the task or service requires the following platforms:\n\n- Linux platform version `1.3.0` or later.\n- Windows platform version `1.0.0` or later.\n\nFor tasks that use the Fargate launch type, the max stop timeout value is 120 seconds and if the parameter is not specified, the default value of 30 seconds is used.\n\nFor tasks that use the EC2 launch type, if the `stopTimeout` parameter isn't specified, the value set for the Amazon ECS container agent configuration variable `ECS_CONTAINER_STOP_TIMEOUT` is used. If neither the `stopTimeout` parameter or the `ECS_CONTAINER_STOP_TIMEOUT` agent configuration variable are set, then the default values of 30 seconds for Linux containers and 30 seconds on Windows containers are used. Your container instances require at least version 1.26.0 of the container agent to use a container stop timeout value. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see [Updating the Amazon ECS Container Agent](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-update.html) in the *Amazon Elastic Container Service Developer Guide* . If you're using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the `ecs-init` package. If your container instances are launched from version `20190301` or later, then they contain the required versions of the container agent and `ecs-init` . For more information, see [Amazon ECS-optimized Linux AMI](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html) in the *Amazon Elastic Container Service Developer Guide* .\n\nThe valid values for Fargate are 2-120 seconds.", "title": "StopTimeout", "type": "number" }, @@ -84668,7 +84660,7 @@ "items": { "$ref": "#/definitions/AWS::ECS::TaskDefinition.SystemControl" }, - "markdownDescription": "A list of namespaced kernel parameters to set in the container. This parameter maps to `Sysctls` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--sysctl` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) . For example, you can configure `net.ipv4.tcp_keepalive_time` setting to maintain longer lived connections.", + "markdownDescription": "A list of namespaced kernel parameters to set in the container. This parameter maps to `Sysctls` in the docker container create command and the `--sysctl` option to docker run. For example, you can configure `net.ipv4.tcp_keepalive_time` setting to maintain longer lived connections.", "title": "SystemControls", "type": "array" }, @@ -84681,7 +84673,7 @@ "type": "array" }, "User": { - "markdownDescription": "The user to use inside the container. This parameter maps to `User` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--user` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> When running tasks using the `host` network mode, don't run containers using the root user (UID 0). We recommend using a non-root user for better security. \n\nYou can specify the `user` using the following formats. If specifying a UID or GID, you must specify it as a positive integer.\n\n- `user`\n- `user:group`\n- `uid`\n- `uid:gid`\n- `user:gid`\n- `uid:group`\n\n> This parameter is not supported for Windows containers.", + "markdownDescription": "The user to use inside the container. This parameter maps to `User` in the docker container create command and the `--user` option to docker run.\n\n> When running tasks using the `host` network mode, don't run containers using the root user (UID 0). We recommend using a non-root user for better security. \n\nYou can specify the `user` using the following formats. If specifying a UID or GID, you must specify it as a positive integer.\n\n- `user`\n- `user:group`\n- `uid`\n- `uid:gid`\n- `user:gid`\n- `uid:group`\n\n> This parameter is not supported for Windows containers.", "title": "User", "type": "string" }, @@ -84689,12 +84681,12 @@ "items": { "$ref": "#/definitions/AWS::ECS::TaskDefinition.VolumeFrom" }, - "markdownDescription": "Data volumes to mount from another container. This parameter maps to `VolumesFrom` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--volumes-from` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .", + "markdownDescription": "Data volumes to mount from another container. This parameter maps to `VolumesFrom` in the docker container create command and the `--volumes-from` option to docker run.", "title": "VolumesFrom", "type": "array" }, "WorkingDirectory": { - "markdownDescription": "The working directory to run commands inside the container in. This parameter maps to `WorkingDir` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--workdir` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .", + "markdownDescription": "The working directory to run commands inside the container in. This parameter maps to `WorkingDir` in the docker container create command and the `--workdir` option to docker run.", "title": "WorkingDirectory", "type": "string" } @@ -84754,13 +84746,13 @@ "type": "boolean" }, "Driver": { - "markdownDescription": "The Docker volume driver to use. The driver value must match the driver name provided by Docker because it is used for task placement. If the driver was installed using the Docker plugin CLI, use `docker plugin ls` to retrieve the driver name from your container instance. If the driver was installed using another method, use Docker plugin discovery to retrieve the driver name. For more information, see [Docker plugin discovery](https://docs.aws.amazon.com/https://docs.docker.com/engine/extend/plugin_api/#plugin-discovery) . This parameter maps to `Driver` in the [Create a volume](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/VolumeCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `xxdriver` option to [docker volume create](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/commandline/volume_create/) .", + "markdownDescription": "The Docker volume driver to use. The driver value must match the driver name provided by Docker because it is used for task placement. If the driver was installed using the Docker plugin CLI, use `docker plugin ls` to retrieve the driver name from your container instance. If the driver was installed using another method, use Docker plugin discovery to retrieve the driver name. This parameter maps to `Driver` in the docker container create command and the `xxdriver` option to docker volume create.", "title": "Driver", "type": "string" }, "DriverOpts": { "additionalProperties": true, - "markdownDescription": "A map of Docker driver-specific options passed through. This parameter maps to `DriverOpts` in the [Create a volume](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/VolumeCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `xxopt` option to [docker volume create](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/commandline/volume_create/) .", + "markdownDescription": "A map of Docker driver-specific options passed through. This parameter maps to `DriverOpts` in the docker create-volume command and the `xxopt` option to docker volume create.", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" @@ -84771,7 +84763,7 @@ }, "Labels": { "additionalProperties": true, - "markdownDescription": "Custom metadata to add to your Docker volume. This parameter maps to `Labels` in the [Create a volume](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/VolumeCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `xxlabel` option to [docker volume create](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/commandline/volume_create/) .", + "markdownDescription": "Custom metadata to add to your Docker volume. This parameter maps to `Labels` in the docker container create command and the `xxlabel` option to docker volume create.", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" @@ -84853,12 +84845,12 @@ "additionalProperties": false, "properties": { "CredentialsParameter": { - "markdownDescription": "", + "markdownDescription": "The authorization credential option to use. The authorization credential options can be provided using either the Amazon Resource Name (ARN) of an AWS Secrets Manager secret or SSM Parameter Store parameter. The ARN refers to the stored credentials.", "title": "CredentialsParameter", "type": "string" }, "Domain": { - "markdownDescription": "", + "markdownDescription": "A fully qualified domain name hosted by an [AWS Directory Service](https://docs.aws.amazon.com/directoryservice/latest/admin-guide/directory_microsoft_ad.html) Managed Microsoft AD (Active Directory) or self-hosted AD on Amazon EC2.", "title": "Domain", "type": "string" } @@ -84923,7 +84915,7 @@ "items": { "type": "string" }, - "markdownDescription": "A string array representing the command that the container runs to determine if it is healthy. The string array must start with `CMD` to run the command arguments directly, or `CMD-SHELL` to run the command with the container's default shell.\n\nWhen you use the AWS Management Console JSON panel, the AWS Command Line Interface , or the APIs, enclose the list of commands in double quotes and brackets.\n\n`[ \"CMD-SHELL\", \"curl -f http://localhost/ || exit 1\" ]`\n\nYou don't include the double quotes and brackets when you use the AWS Management Console.\n\n`CMD-SHELL, curl -f http://localhost/ || exit 1`\n\nAn exit code of 0 indicates success, and non-zero exit code indicates failure. For more information, see `HealthCheck` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) .", + "markdownDescription": "A string array representing the command that the container runs to determine if it is healthy. The string array must start with `CMD` to run the command arguments directly, or `CMD-SHELL` to run the command with the container's default shell.\n\nWhen you use the AWS Management Console JSON panel, the AWS Command Line Interface , or the APIs, enclose the list of commands in double quotes and brackets.\n\n`[ \"CMD-SHELL\", \"curl -f http://localhost/ || exit 1\" ]`\n\nYou don't include the double quotes and brackets when you use the AWS Management Console.\n\n`CMD-SHELL, curl -f http://localhost/ || exit 1`\n\nAn exit code of 0 indicates success, and non-zero exit code indicates failure. For more information, see `HealthCheck` in the docker container create command", "title": "Command", "type": "array" }, @@ -85000,7 +84992,7 @@ "items": { "type": "string" }, - "markdownDescription": "The Linux capabilities for the container that have been added to the default configuration provided by Docker. This parameter maps to `CapAdd` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--cap-add` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> Tasks launched on AWS Fargate only support adding the `SYS_PTRACE` kernel capability. \n\nValid values: `\"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" | \"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" | \"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" | \"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\" | \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" | \"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" | \"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" | \"WAKE_ALARM\"`", + "markdownDescription": "The Linux capabilities for the container that have been added to the default configuration provided by Docker. This parameter maps to `CapAdd` in the docker container create command and the `--cap-add` option to docker run.\n\n> Tasks launched on AWS Fargate only support adding the `SYS_PTRACE` kernel capability. \n\nValid values: `\"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" | \"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" | \"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" | \"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\" | \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" | \"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" | \"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" | \"WAKE_ALARM\"`", "title": "Add", "type": "array" }, @@ -85008,7 +85000,7 @@ "items": { "type": "string" }, - "markdownDescription": "The Linux capabilities for the container that have been removed from the default configuration provided by Docker. This parameter maps to `CapDrop` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--cap-drop` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\nValid values: `\"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" | \"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" | \"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" | \"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\" | \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" | \"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" | \"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" | \"WAKE_ALARM\"`", + "markdownDescription": "The Linux capabilities for the container that have been removed from the default configuration provided by Docker. This parameter maps to `CapDrop` in the docker container create command and the `--cap-drop` option to docker run.\n\nValid values: `\"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" | \"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" | \"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" | \"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\" | \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" | \"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" | \"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" | \"WAKE_ALARM\"`", "title": "Drop", "type": "array" } @@ -85043,27 +85035,27 @@ "items": { "$ref": "#/definitions/AWS::ECS::TaskDefinition.Device" }, - "markdownDescription": "Any host devices to expose to the container. This parameter maps to `Devices` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--device` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> If you're using tasks that use the Fargate launch type, the `devices` parameter isn't supported.", + "markdownDescription": "Any host devices to expose to the container. This parameter maps to `Devices` in the docker container create command and the `--device` option to docker run.\n\n> If you're using tasks that use the Fargate launch type, the `devices` parameter isn't supported.", "title": "Devices", "type": "array" }, "InitProcessEnabled": { - "markdownDescription": "Run an `init` process inside the container that forwards signals and reaps processes. This parameter maps to the `--init` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) . This parameter requires version 1.25 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`", + "markdownDescription": "Run an `init` process inside the container that forwards signals and reaps processes. This parameter maps to the `--init` option to docker run. This parameter requires version 1.25 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`", "title": "InitProcessEnabled", "type": "boolean" }, "MaxSwap": { - "markdownDescription": "The total amount of swap memory (in MiB) a container can use. This parameter will be translated to the `--memory-swap` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) where the value would be the sum of the container memory plus the `maxSwap` value.\n\nIf a `maxSwap` value of `0` is specified, the container will not use swap. Accepted values are `0` or any positive integer. If the `maxSwap` parameter is omitted, the container will use the swap configuration for the container instance it is running on. A `maxSwap` value must be set for the `swappiness` parameter to be used.\n\n> If you're using tasks that use the Fargate launch type, the `maxSwap` parameter isn't supported.\n> \n> If you're using tasks on Amazon Linux 2023 the `swappiness` parameter isn't supported.", + "markdownDescription": "The total amount of swap memory (in MiB) a container can use. This parameter will be translated to the `--memory-swap` option to docker run where the value would be the sum of the container memory plus the `maxSwap` value.\n\nIf a `maxSwap` value of `0` is specified, the container will not use swap. Accepted values are `0` or any positive integer. If the `maxSwap` parameter is omitted, the container will use the swap configuration for the container instance it is running on. A `maxSwap` value must be set for the `swappiness` parameter to be used.\n\n> If you're using tasks that use the Fargate launch type, the `maxSwap` parameter isn't supported.\n> \n> If you're using tasks on Amazon Linux 2023 the `swappiness` parameter isn't supported.", "title": "MaxSwap", "type": "number" }, "SharedMemorySize": { - "markdownDescription": "The value for the size (in MiB) of the `/dev/shm` volume. This parameter maps to the `--shm-size` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> If you are using tasks that use the Fargate launch type, the `sharedMemorySize` parameter is not supported.", + "markdownDescription": "The value for the size (in MiB) of the `/dev/shm` volume. This parameter maps to the `--shm-size` option to docker run.\n\n> If you are using tasks that use the Fargate launch type, the `sharedMemorySize` parameter is not supported.", "title": "SharedMemorySize", "type": "number" }, "Swappiness": { - "markdownDescription": "This allows you to tune a container's memory swappiness behavior. A `swappiness` value of `0` will cause swapping to not happen unless absolutely necessary. A `swappiness` value of `100` will cause pages to be swapped very aggressively. Accepted values are whole numbers between `0` and `100` . If the `swappiness` parameter is not specified, a default value of `60` is used. If a value is not specified for `maxSwap` then this parameter is ignored. This parameter maps to the `--memory-swappiness` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> If you're using tasks that use the Fargate launch type, the `swappiness` parameter isn't supported.\n> \n> If you're using tasks on Amazon Linux 2023 the `swappiness` parameter isn't supported.", + "markdownDescription": "This allows you to tune a container's memory swappiness behavior. A `swappiness` value of `0` will cause swapping to not happen unless absolutely necessary. A `swappiness` value of `100` will cause pages to be swapped very aggressively. Accepted values are whole numbers between `0` and `100` . If the `swappiness` parameter is not specified, a default value of `60` is used. If a value is not specified for `maxSwap` then this parameter is ignored. This parameter maps to the `--memory-swappiness` option to docker run.\n\n> If you're using tasks that use the Fargate launch type, the `swappiness` parameter isn't supported.\n> \n> If you're using tasks on Amazon Linux 2023 the `swappiness` parameter isn't supported.", "title": "Swappiness", "type": "number" }, @@ -85071,7 +85063,7 @@ "items": { "$ref": "#/definitions/AWS::ECS::TaskDefinition.Tmpfs" }, - "markdownDescription": "The container path, mount options, and size (in MiB) of the tmpfs mount. This parameter maps to the `--tmpfs` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> If you're using tasks that use the Fargate launch type, the `tmpfs` parameter isn't supported.", + "markdownDescription": "The container path, mount options, and size (in MiB) of the tmpfs mount. This parameter maps to the `--tmpfs` option to docker run.\n\n> If you're using tasks that use the Fargate launch type, the `tmpfs` parameter isn't supported.", "title": "Tmpfs", "type": "array" } @@ -85088,7 +85080,7 @@ }, "Options": { "additionalProperties": true, - "markdownDescription": "The configuration options to send to the log driver. This parameter requires version 1.19 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`", + "markdownDescription": "The configuration options to send to the log driver.\n\nThe options you can specify depend on the log driver. Some of the options you can specify when you use the `awslogs` log driver to route logs to Amazon CloudWatch include the following:\n\n- **awslogs-create-group** - Required: No\n\nSpecify whether you want the log group to be created automatically. If this option isn't specified, it defaults to `false` .\n\n> Your IAM policy must include the `logs:CreateLogGroup` permission before you attempt to use `awslogs-create-group` .\n- **awslogs-region** - Required: Yes\n\nSpecify the AWS Region that the `awslogs` log driver is to send your Docker logs to. You can choose to send all of your logs from clusters in different Regions to a single region in CloudWatch Logs. This is so that they're all visible in one location. Otherwise, you can separate them by Region for more granularity. Make sure that the specified log group exists in the Region that you specify with this option.\n- **awslogs-group** - Required: Yes\n\nMake sure to specify a log group that the `awslogs` log driver sends its log streams to.\n- **awslogs-stream-prefix** - Required: Yes, when using the Fargate launch type.Optional for the EC2 launch type, required for the Fargate launch type.\n\nUse the `awslogs-stream-prefix` option to associate a log stream with the specified prefix, the container name, and the ID of the Amazon ECS task that the container belongs to. If you specify a prefix with this option, then the log stream takes the format `prefix-name/container-name/ecs-task-id` .\n\nIf you don't specify a prefix with this option, then the log stream is named after the container ID that's assigned by the Docker daemon on the container instance. Because it's difficult to trace logs back to the container that sent them with just the Docker container ID (which is only available on the container instance), we recommend that you specify a prefix with this option.\n\nFor Amazon ECS services, you can use the service name as the prefix. Doing so, you can trace log streams to the service that the container belongs to, the name of the container that sent them, and the ID of the task that the container belongs to.\n\nYou must specify a stream-prefix for your logs to have your logs appear in the Log pane when using the Amazon ECS console.\n- **awslogs-datetime-format** - Required: No\n\nThis option defines a multiline start pattern in Python `strftime` format. A log message consists of a line that matches the pattern and any following lines that don\u2019t match the pattern. The matched line is the delimiter between log messages.\n\nOne example of a use case for using this format is for parsing output such as a stack dump, which might otherwise be logged in multiple entries. The correct pattern allows it to be captured in a single entry.\n\nFor more information, see [awslogs-datetime-format](https://docs.aws.amazon.com/https://docs.docker.com/config/containers/logging/awslogs/#awslogs-datetime-format) .\n\nYou cannot configure both the `awslogs-datetime-format` and `awslogs-multiline-pattern` options.\n\n> Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance.\n- **awslogs-multiline-pattern** - Required: No\n\nThis option defines a multiline start pattern that uses a regular expression. A log message consists of a line that matches the pattern and any following lines that don\u2019t match the pattern. The matched line is the delimiter between log messages.\n\nFor more information, see [awslogs-multiline-pattern](https://docs.aws.amazon.com/https://docs.docker.com/config/containers/logging/awslogs/#awslogs-multiline-pattern) .\n\nThis option is ignored if `awslogs-datetime-format` is also configured.\n\nYou cannot configure both the `awslogs-datetime-format` and `awslogs-multiline-pattern` options.\n\n> Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance.\n- **mode** - Required: No\n\nValid values: `non-blocking` | `blocking`\n\nThis option defines the delivery mode of log messages from the container to CloudWatch Logs. The delivery mode you choose affects application availability when the flow of logs from container to CloudWatch is interrupted.\n\nIf you use the `blocking` mode and the flow of logs to CloudWatch is interrupted, calls from container code to write to the `stdout` and `stderr` streams will block. The logging thread of the application will block as a result. This may cause the application to become unresponsive and lead to container healthcheck failure.\n\nIf you use the `non-blocking` mode, the container's logs are instead stored in an in-memory intermediate buffer configured with the `max-buffer-size` option. This prevents the application from becoming unresponsive when logs cannot be sent to CloudWatch. We recommend using this mode if you want to ensure service availability and are okay with some log loss. For more information, see [Preventing log loss with non-blocking mode in the `awslogs` container log driver](https://docs.aws.amazon.com/containers/preventing-log-loss-with-non-blocking-mode-in-the-awslogs-container-log-driver/) .\n- **max-buffer-size** - Required: No\n\nDefault value: `1m`\n\nWhen `non-blocking` mode is used, the `max-buffer-size` log option controls the size of the buffer that's used for intermediate message storage. Make sure to specify an adequate buffer size based on your application. When the buffer fills up, further logs cannot be stored. Logs that cannot be stored are lost.\n\nTo route logs using the `splunk` log router, you need to specify a `splunk-token` and a `splunk-url` .\n\nWhen you use the `awsfirelens` log router to route logs to an AWS Service or AWS Partner Network destination for log storage and analytics, you can set the `log-driver-buffer-limit` option to limit the number of events that are buffered in memory, before being sent to the log router container. It can help to resolve potential log loss issue because high throughput might result in memory running out for the buffer inside of Docker.\n\nOther options you can specify when using `awsfirelens` to route logs depend on the destination. When you export logs to Amazon Data Firehose, you can specify the AWS Region with `region` and a name for the log stream with `delivery_stream` .\n\nWhen you export logs to Amazon Kinesis Data Streams, you can specify an AWS Region with `region` and a data stream name with `stream` .\n\nWhen you export logs to Amazon OpenSearch Service, you can specify options like `Name` , `Host` (OpenSearch Service endpoint without protocol), `Port` , `Index` , `Type` , `Aws_auth` , `Aws_region` , `Suppress_Type_Name` , and `tls` .\n\nWhen you export logs to Amazon S3, you can specify the bucket using the `bucket` option. You can also specify `region` , `total_file_size` , `upload_timeout` , and `use_put_object` as options.\n\nThis parameter requires version 1.19 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" @@ -85328,7 +85320,7 @@ "additionalProperties": false, "properties": { "HardLimit": { - "markdownDescription": "The hard limit for the `ulimit` type.", + "markdownDescription": "The hard limit for the `ulimit` type. The value can be specified in bytes, seconds, or as a count, depending on the `type` of the `ulimit` .", "title": "HardLimit", "type": "number" }, @@ -85338,7 +85330,7 @@ "type": "string" }, "SoftLimit": { - "markdownDescription": "The soft limit for the `ulimit` type.", + "markdownDescription": "The soft limit for the `ulimit` type. The value can be specified in bytes, seconds, or as a count, depending on the `type` of the `ulimit` .", "title": "SoftLimit", "type": "number" } @@ -85542,7 +85534,7 @@ "items": { "type": "string" }, - "markdownDescription": "The IDs of the security groups associated with the task or service. If you don't specify a security group, the default security group for the VPC is used. There's a limit of 5 security groups that can be specified per `AwsVpcConfiguration` .\n\n> All specified security groups must be from the same VPC.", + "markdownDescription": "The IDs of the security groups associated with the task or service. If you don't specify a security group, the default security group for the VPC is used. There's a limit of 5 security groups that can be specified per `awsvpcConfiguration` .\n\n> All specified security groups must be from the same VPC.", "title": "SecurityGroups", "type": "array" }, @@ -85550,7 +85542,7 @@ "items": { "type": "string" }, - "markdownDescription": "The IDs of the subnets associated with the task or service. There's a limit of 16 subnets that can be specified per `AwsVpcConfiguration` .\n\n> All specified subnets must be from the same VPC.", + "markdownDescription": "The IDs of the subnets associated with the task or service. There's a limit of 16 subnets that can be specified per `awsvpcConfiguration` .\n\n> All specified subnets must be from the same VPC.", "title": "Subnets", "type": "array" } @@ -87909,12 +87901,12 @@ "properties": { "OnDemandSpecification": { "$ref": "#/definitions/AWS::EMR::Cluster.OnDemandProvisioningSpecification", - "markdownDescription": "The launch specification for On-Demand Instances in the instance fleet, which determines the allocation strategy.\n\n> The instance fleet configuration is available only in Amazon EMR releases 4.8.0 and later, excluding 5.0.x versions. On-Demand Instances allocation strategy is available in Amazon EMR releases 5.12.1 and later.", + "markdownDescription": "The launch specification for On-Demand Instances in the instance fleet, which determines the allocation strategy and capacity reservation options.\n\n> The instance fleet configuration is available only in Amazon EMR releases 4.8.0 and later, excluding 5.0.x versions. On-Demand Instances allocation strategy is available in Amazon EMR releases 5.12.1 and later.", "title": "OnDemandSpecification" }, "SpotSpecification": { "$ref": "#/definitions/AWS::EMR::Cluster.SpotProvisioningSpecification", - "markdownDescription": "The launch specification for Spot instances in the fleet, which determines the defined duration, provisioning timeout behavior, and allocation strategy.", + "markdownDescription": "The launch specification for Spot instances in the fleet, which determines the allocation strategy, defined duration, and provisioning timeout behavior.", "title": "SpotSpecification" } }, @@ -88659,12 +88651,12 @@ "properties": { "OnDemandSpecification": { "$ref": "#/definitions/AWS::EMR::InstanceFleetConfig.OnDemandProvisioningSpecification", - "markdownDescription": "The launch specification for On-Demand Instances in the instance fleet, which determines the allocation strategy.\n\n> The instance fleet configuration is available only in Amazon EMR releases 4.8.0 and later, excluding 5.0.x versions. On-Demand Instances allocation strategy is available in Amazon EMR releases 5.12.1 and later.", + "markdownDescription": "The launch specification for On-Demand Instances in the instance fleet, which determines the allocation strategy and capacity reservation options.\n\n> The instance fleet configuration is available only in Amazon EMR releases 4.8.0 and later, excluding 5.0.x versions. On-Demand Instances allocation strategy is available in Amazon EMR releases 5.12.1 and later.", "title": "OnDemandSpecification" }, "SpotSpecification": { "$ref": "#/definitions/AWS::EMR::InstanceFleetConfig.SpotProvisioningSpecification", - "markdownDescription": "The launch specification for Spot instances in the fleet, which determines the defined duration, provisioning timeout behavior, and allocation strategy.", + "markdownDescription": "The launch specification for Spot instances in the fleet, which determines the allocation strategy, defined duration, and provisioning timeout behavior.", "title": "SpotSpecification" } }, @@ -90375,12 +90367,12 @@ "type": "string" }, "AutoMinorVersionUpgrade": { - "markdownDescription": "If you are running Redis engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next minor version upgrade campaign. This parameter is disabled for previous versions.", + "markdownDescription": "If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next minor version upgrade campaign. This parameter is disabled for previous versions.", "title": "AutoMinorVersionUpgrade", "type": "boolean" }, "CacheNodeType": { - "markdownDescription": "The compute and memory capacity of the nodes in the node group (shard).\n\nThe following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts. Changing the CacheNodeType of a Memcached instance is currently not supported. If you need to scale using Memcached, we recommend forcing a replacement update by changing the `LogicalResourceId` of the resource.\n\n- General purpose:\n\n- Current generation:\n\n*M6g node types:* `cache.m6g.large` , `cache.m6g.xlarge` , `cache.m6g.2xlarge` , `cache.m6g.4xlarge` , `cache.m6g.8xlarge` , `cache.m6g.12xlarge` , `cache.m6g.16xlarge` , `cache.m6g.24xlarge`\n\n*M5 node types:* `cache.m5.large` , `cache.m5.xlarge` , `cache.m5.2xlarge` , `cache.m5.4xlarge` , `cache.m5.12xlarge` , `cache.m5.24xlarge`\n\n*M4 node types:* `cache.m4.large` , `cache.m4.xlarge` , `cache.m4.2xlarge` , `cache.m4.4xlarge` , `cache.m4.10xlarge`\n\n*T4g node types:* `cache.t4g.micro` , `cache.t4g.small` , `cache.t4g.medium`\n\n*T3 node types:* `cache.t3.micro` , `cache.t3.small` , `cache.t3.medium`\n\n*T2 node types:* `cache.t2.micro` , `cache.t2.small` , `cache.t2.medium`\n- Previous generation: (not recommended)\n\n*T1 node types:* `cache.t1.micro`\n\n*M1 node types:* `cache.m1.small` , `cache.m1.medium` , `cache.m1.large` , `cache.m1.xlarge`\n\n*M3 node types:* `cache.m3.medium` , `cache.m3.large` , `cache.m3.xlarge` , `cache.m3.2xlarge`\n- Compute optimized:\n\n- Previous generation: (not recommended)\n\n*C1 node types:* `cache.c1.xlarge`\n- Memory optimized:\n\n- Current generation:\n\n*R6gd node types:* `cache.r6gd.xlarge` , `cache.r6gd.2xlarge` , `cache.r6gd.4xlarge` , `cache.r6gd.8xlarge` , `cache.r6gd.12xlarge` , `cache.r6gd.16xlarge`\n\n> The `r6gd` family is available in the following regions: `us-east-2` , `us-east-1` , `us-west-2` , `us-west-1` , `eu-west-1` , `eu-central-1` , `ap-northeast-1` , `ap-southeast-1` , `ap-southeast-2` . \n\n*R6g node types:* `cache.r6g.large` , `cache.r6g.xlarge` , `cache.r6g.2xlarge` , `cache.r6g.4xlarge` , `cache.r6g.8xlarge` , `cache.r6g.12xlarge` , `cache.r6g.16xlarge` , `cache.r6g.24xlarge`\n\n*R5 node types:* `cache.r5.large` , `cache.r5.xlarge` , `cache.r5.2xlarge` , `cache.r5.4xlarge` , `cache.r5.12xlarge` , `cache.r5.24xlarge`\n\n*R4 node types:* `cache.r4.large` , `cache.r4.xlarge` , `cache.r4.2xlarge` , `cache.r4.4xlarge` , `cache.r4.8xlarge` , `cache.r4.16xlarge`\n- Previous generation: (not recommended)\n\n*M2 node types:* `cache.m2.xlarge` , `cache.m2.2xlarge` , `cache.m2.4xlarge`\n\n*R3 node types:* `cache.r3.large` , `cache.r3.xlarge` , `cache.r3.2xlarge` , `cache.r3.4xlarge` , `cache.r3.8xlarge`\n\nFor region availability, see [Supported Node Types by Region](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion)\n\n*Additional node type info*\n\n- All current generation instance types are created in Amazon VPC by default.\n- Redis append-only files (AOF) are not supported for T1 or T2 instances.\n- Redis Multi-AZ with automatic failover is not supported on T1 instances.\n- Redis configuration variables `appendonly` and `appendfsync` are not supported on Redis version 2.8.22 and later.", + "markdownDescription": "The compute and memory capacity of the nodes in the node group (shard).\n\nThe following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts. Changing the CacheNodeType of a Memcached instance is currently not supported. If you need to scale using Memcached, we recommend forcing a replacement update by changing the `LogicalResourceId` of the resource.\n\n- General purpose:\n\n- Current generation:\n\n*M6g node types:* `cache.m6g.large` , `cache.m6g.xlarge` , `cache.m6g.2xlarge` , `cache.m6g.4xlarge` , `cache.m6g.8xlarge` , `cache.m6g.12xlarge` , `cache.m6g.16xlarge` , `cache.m6g.24xlarge`\n\n*M5 node types:* `cache.m5.large` , `cache.m5.xlarge` , `cache.m5.2xlarge` , `cache.m5.4xlarge` , `cache.m5.12xlarge` , `cache.m5.24xlarge`\n\n*M4 node types:* `cache.m4.large` , `cache.m4.xlarge` , `cache.m4.2xlarge` , `cache.m4.4xlarge` , `cache.m4.10xlarge`\n\n*T4g node types:* `cache.t4g.micro` , `cache.t4g.small` , `cache.t4g.medium`\n\n*T3 node types:* `cache.t3.micro` , `cache.t3.small` , `cache.t3.medium`\n\n*T2 node types:* `cache.t2.micro` , `cache.t2.small` , `cache.t2.medium`\n- Previous generation: (not recommended)\n\n*T1 node types:* `cache.t1.micro`\n\n*M1 node types:* `cache.m1.small` , `cache.m1.medium` , `cache.m1.large` , `cache.m1.xlarge`\n\n*M3 node types:* `cache.m3.medium` , `cache.m3.large` , `cache.m3.xlarge` , `cache.m3.2xlarge`\n- Compute optimized:\n\n- Previous generation: (not recommended)\n\n*C1 node types:* `cache.c1.xlarge`\n- Memory optimized:\n\n- Current generation:\n\n*R6gd node types:* `cache.r6gd.xlarge` , `cache.r6gd.2xlarge` , `cache.r6gd.4xlarge` , `cache.r6gd.8xlarge` , `cache.r6gd.12xlarge` , `cache.r6gd.16xlarge`\n\n> The `r6gd` family is available in the following regions: `us-east-2` , `us-east-1` , `us-west-2` , `us-west-1` , `eu-west-1` , `eu-central-1` , `ap-northeast-1` , `ap-southeast-1` , `ap-southeast-2` . \n\n*R6g node types:* `cache.r6g.large` , `cache.r6g.xlarge` , `cache.r6g.2xlarge` , `cache.r6g.4xlarge` , `cache.r6g.8xlarge` , `cache.r6g.12xlarge` , `cache.r6g.16xlarge` , `cache.r6g.24xlarge`\n\n*R5 node types:* `cache.r5.large` , `cache.r5.xlarge` , `cache.r5.2xlarge` , `cache.r5.4xlarge` , `cache.r5.12xlarge` , `cache.r5.24xlarge`\n\n*R4 node types:* `cache.r4.large` , `cache.r4.xlarge` , `cache.r4.2xlarge` , `cache.r4.4xlarge` , `cache.r4.8xlarge` , `cache.r4.16xlarge`\n- Previous generation: (not recommended)\n\n*M2 node types:* `cache.m2.xlarge` , `cache.m2.2xlarge` , `cache.m2.4xlarge`\n\n*R3 node types:* `cache.r3.large` , `cache.r3.xlarge` , `cache.r3.2xlarge` , `cache.r3.4xlarge` , `cache.r3.8xlarge`\n\nFor region availability, see [Supported Node Types by Region](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion)\n\n*Additional node type info*\n\n- All current generation instance types are created in Amazon VPC by default.\n- Redis OSS append-only files (AOF) are not supported for T1 or T2 instances.\n- Redis OSS Multi-AZ with automatic failover is not supported on T1 instances.\n- Redis OSS configuration variables `appendonly` and `appendfsync` are not supported on Redis OSS version 2.8.22 and later.", "title": "CacheNodeType", "type": "string" }, @@ -90418,7 +90410,7 @@ "type": "string" }, "IpDiscovery": { - "markdownDescription": "The network type you choose when modifying a cluster, either `ipv4` | `ipv6` . IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", + "markdownDescription": "The network type you choose when modifying a cluster, either `ipv4` | `ipv6` . IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", "title": "IpDiscovery", "type": "string" }, @@ -90431,7 +90423,7 @@ "type": "array" }, "NetworkType": { - "markdownDescription": "Must be either `ipv4` | `ipv6` | `dual_stack` . IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", + "markdownDescription": "Must be either `ipv4` | `ipv6` | `dual_stack` . IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", "title": "NetworkType", "type": "string" }, @@ -90472,12 +90464,12 @@ "items": { "type": "string" }, - "markdownDescription": "A single-element string list containing an Amazon Resource Name (ARN) that uniquely identifies a Redis RDB snapshot file stored in Amazon S3. The snapshot file is used to populate the node group (shard). The Amazon S3 object name in the ARN cannot contain any commas.\n\n> This parameter is only valid if the `Engine` parameter is `redis` . \n\nExample of an Amazon S3 ARN: `arn:aws:s3:::my_bucket/snapshot1.rdb`", + "markdownDescription": "A single-element string list containing an Amazon Resource Name (ARN) that uniquely identifies a Redis OSS RDB snapshot file stored in Amazon S3. The snapshot file is used to populate the node group (shard). The Amazon S3 object name in the ARN cannot contain any commas.\n\n> This parameter is only valid if the `Engine` parameter is `redis` . \n\nExample of an Amazon S3 ARN: `arn:aws:s3:::my_bucket/snapshot1.rdb`", "title": "SnapshotArns", "type": "array" }, "SnapshotName": { - "markdownDescription": "The name of a Redis snapshot from which to restore data into the new node group (shard). The snapshot status changes to `restoring` while the new node group (shard) is being created.\n\n> This parameter is only valid if the `Engine` parameter is `redis` .", + "markdownDescription": "The name of a Redis OSS snapshot from which to restore data into the new node group (shard). The snapshot status changes to `restoring` while the new node group (shard) is being created.\n\n> This parameter is only valid if the `Engine` parameter is `redis` .", "title": "SnapshotName", "type": "string" }, @@ -90653,7 +90645,7 @@ "additionalProperties": false, "properties": { "AutomaticFailoverEnabled": { - "markdownDescription": "Specifies whether a read-only replica is automatically promoted to read/write primary if the existing primary fails.\n\n`AutomaticFailoverEnabled` must be enabled for Redis (cluster mode enabled) replication groups.", + "markdownDescription": "Specifies whether a read-only replica is automatically promoted to read/write primary if the existing primary fails.\n\n`AutomaticFailoverEnabled` must be enabled for Redis OSS (cluster mode enabled) replication groups.", "title": "AutomaticFailoverEnabled", "type": "boolean" }, @@ -90668,7 +90660,7 @@ "type": "string" }, "EngineVersion": { - "markdownDescription": "The Elasticache Redis engine version.", + "markdownDescription": "The Elasticache Redis OSS engine version.", "title": "EngineVersion", "type": "string" }, @@ -90779,7 +90771,7 @@ "additionalProperties": false, "properties": { "NodeGroupId": { - "markdownDescription": "Either the ElastiCache for Redis supplied 4-digit id or a user supplied id for the node group these configuration values apply to.", + "markdownDescription": "Either the ElastiCache (Redis OSS) supplied 4-digit id or a user supplied id for the node group these configuration values apply to.", "title": "NodeGroupId", "type": "string" }, @@ -90922,22 +90914,22 @@ "additionalProperties": false, "properties": { "AtRestEncryptionEnabled": { - "markdownDescription": "A flag that enables encryption at rest when set to `true` .\n\nYou cannot modify the value of `AtRestEncryptionEnabled` after the replication group is created. To enable encryption at rest on a replication group you must set `AtRestEncryptionEnabled` to `true` when you create the replication group.\n\n*Required:* Only available when creating a replication group in an Amazon VPC using redis version `3.2.6` or `4.x` onward.\n\nDefault: `false`", + "markdownDescription": "A flag that enables encryption at rest when set to `true` .\n\nYou cannot modify the value of `AtRestEncryptionEnabled` after the replication group is created. To enable encryption at rest on a replication group you must set `AtRestEncryptionEnabled` to `true` when you create the replication group.\n\n*Required:* Only available when creating a replication group in an Amazon VPC using Redis OSS version `3.2.6` or `4.x` onward.\n\nDefault: `false`", "title": "AtRestEncryptionEnabled", "type": "boolean" }, "AuthToken": { - "markdownDescription": "*Reserved parameter.* The password used to access a password protected server.\n\n`AuthToken` can be specified only on replication groups where `TransitEncryptionEnabled` is `true` . For more information, see [Authenticating Users with the Redis AUTH Command](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/auth.html) .\n\n> For HIPAA compliance, you must specify `TransitEncryptionEnabled` as `true` , an `AuthToken` , and a `CacheSubnetGroup` . \n\nPassword constraints:\n\n- Must be only printable ASCII characters.\n- Must be at least 16 characters and no more than 128 characters in length.\n- Nonalphanumeric characters are restricted to (!, &, #, $, ^, <, >, -, ).\n\nFor more information, see [AUTH password](https://docs.aws.amazon.com/http://redis.io/commands/AUTH) at http://redis.io/commands/AUTH.\n\n> If ADDING the AuthToken, update requires [Replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .", + "markdownDescription": "*Reserved parameter.* The password used to access a password protected server.\n\n`AuthToken` can be specified only on replication groups where `TransitEncryptionEnabled` is `true` . For more information, see [Authenticating Users with the Redis OSS AUTH Command](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/auth.html) .\n\n> For HIPAA compliance, you must specify `TransitEncryptionEnabled` as `true` , an `AuthToken` , and a `CacheSubnetGroup` . \n\nPassword constraints:\n\n- Must be only printable ASCII characters.\n- Must be at least 16 characters and no more than 128 characters in length.\n- Nonalphanumeric characters are restricted to (!, &, #, $, ^, <, >, -, ).\n\nFor more information, see [AUTH password](https://docs.aws.amazon.com/http://redis.io/commands/AUTH) at http://redis.io/commands/AUTH.\n\n> If ADDING the AuthToken, update requires [Replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .", "title": "AuthToken", "type": "string" }, "AutoMinorVersionUpgrade": { - "markdownDescription": "If you are running Redis engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next minor version upgrade campaign. This parameter is disabled for previous versions.", + "markdownDescription": "If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next minor version upgrade campaign. This parameter is disabled for previous versions.", "title": "AutoMinorVersionUpgrade", "type": "boolean" }, "AutomaticFailoverEnabled": { - "markdownDescription": "Specifies whether a read-only replica is automatically promoted to read/write primary if the existing primary fails.\n\n`AutomaticFailoverEnabled` must be enabled for Redis (cluster mode enabled) replication groups.\n\nDefault: false", + "markdownDescription": "Specifies whether a read-only replica is automatically promoted to read/write primary if the existing primary fails.\n\n`AutomaticFailoverEnabled` must be enabled for Redis OSS (cluster mode enabled) replication groups.\n\nDefault: false", "title": "AutomaticFailoverEnabled", "type": "boolean" }, @@ -90947,7 +90939,7 @@ "type": "string" }, "CacheParameterGroupName": { - "markdownDescription": "The name of the parameter group to associate with this replication group. If this argument is omitted, the default cache parameter group for the specified engine is used.\n\nIf you are running Redis version 3.2.4 or later, only one node group (shard), and want to use a default parameter group, we recommend that you specify the parameter group by name.\n\n- To create a Redis (cluster mode disabled) replication group, use `CacheParameterGroupName=default.redis3.2` .\n- To create a Redis (cluster mode enabled) replication group, use `CacheParameterGroupName=default.redis3.2.cluster.on` .", + "markdownDescription": "The name of the parameter group to associate with this replication group. If this argument is omitted, the default cache parameter group for the specified engine is used.\n\nIf you are running Redis OSS version 3.2.4 or later, only one node group (shard), and want to use a default parameter group, we recommend that you specify the parameter group by name.\n\n- To create a Redis OSS (cluster mode disabled) replication group, use `CacheParameterGroupName=default.redis3.2` .\n- To create a Redis OSS (cluster mode enabled) replication group, use `CacheParameterGroupName=default.redis3.2.cluster.on` .", "title": "CacheParameterGroupName", "type": "string" }, @@ -90965,7 +90957,7 @@ "type": "string" }, "ClusterMode": { - "markdownDescription": "Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first set the cluster mode to Compatible. Compatible mode allows your Redis clients to connect using both cluster mode enabled and cluster mode disabled. After you migrate all Redis clients to use cluster mode enabled, you can then complete cluster mode configuration and set the cluster mode to Enabled. For more information, see [Modify cluster mode](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/modify-cluster-mode.html) .", + "markdownDescription": "Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first set the cluster mode to Compatible. Compatible mode allows your Redis OSS clients to connect using both cluster mode enabled and cluster mode disabled. After you migrate all Redis OSS clients to use cluster mode enabled, you can then complete cluster mode configuration and set the cluster mode to Enabled. For more information, see [Modify cluster mode](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/modify-cluster-mode.html) .", "title": "ClusterMode", "type": "string" }, @@ -90990,7 +90982,7 @@ "type": "string" }, "IpDiscovery": { - "markdownDescription": "The network type you choose when creating a replication group, either `ipv4` | `ipv6` . IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", + "markdownDescription": "The network type you choose when creating a replication group, either `ipv4` | `ipv6` . IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", "title": "IpDiscovery", "type": "string" }, @@ -91013,7 +91005,7 @@ "type": "boolean" }, "NetworkType": { - "markdownDescription": "Must be either `ipv4` | `ipv6` | `dual_stack` . IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", + "markdownDescription": "Must be either `ipv4` | `ipv6` | `dual_stack` . IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", "title": "NetworkType", "type": "string" }, @@ -91021,7 +91013,7 @@ "items": { "$ref": "#/definitions/AWS::ElastiCache::ReplicationGroup.NodeGroupConfiguration" }, - "markdownDescription": "`NodeGroupConfiguration` is a property of the `AWS::ElastiCache::ReplicationGroup` resource that configures an Amazon ElastiCache (ElastiCache) Redis cluster node group.\n\nIf you set [UseOnlineResharding](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-updatepolicy.html#cfn-attributes-updatepolicy-useonlineresharding) to `true` , you can update `NodeGroupConfiguration` without interruption. When `UseOnlineResharding` is set to `false` , or is not specified, updating `NodeGroupConfiguration` results in [replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .", + "markdownDescription": "`NodeGroupConfiguration` is a property of the `AWS::ElastiCache::ReplicationGroup` resource that configures an Amazon ElastiCache (ElastiCache) Redis OSS cluster node group.\n\nIf you set [UseOnlineResharding](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-updatepolicy.html#cfn-attributes-updatepolicy-useonlineresharding) to `true` , you can update `NodeGroupConfiguration` without interruption. When `UseOnlineResharding` is set to `false` , or is not specified, updating `NodeGroupConfiguration` results in [replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .", "title": "NodeGroupConfiguration", "type": "array" }, @@ -91036,7 +91028,7 @@ "type": "number" }, "NumNodeGroups": { - "markdownDescription": "An optional parameter that specifies the number of node groups (shards) for this Redis (cluster mode enabled) replication group. For Redis (cluster mode disabled) either omit this parameter or set it to 1.\n\nIf you set [UseOnlineResharding](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-updatepolicy.html#cfn-attributes-updatepolicy-useonlineresharding) to `true` , you can update `NumNodeGroups` without interruption. When `UseOnlineResharding` is set to `false` , or is not specified, updating `NumNodeGroups` results in [replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .\n\nDefault: 1", + "markdownDescription": "An optional parameter that specifies the number of node groups (shards) for this Redis OSS (cluster mode enabled) replication group. For Redis OSS (cluster mode disabled) either omit this parameter or set it to 1.\n\nIf you set [UseOnlineResharding](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-updatepolicy.html#cfn-attributes-updatepolicy-useonlineresharding) to `true` , you can update `NumNodeGroups` without interruption. When `UseOnlineResharding` is set to `false` , or is not specified, updating `NumNodeGroups` results in [replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .\n\nDefault: 1", "title": "NumNodeGroups", "type": "number" }, @@ -91090,7 +91082,7 @@ "items": { "type": "string" }, - "markdownDescription": "A list of Amazon Resource Names (ARN) that uniquely identify the Redis RDB snapshot files stored in Amazon S3. The snapshot files are used to populate the new replication group. The Amazon S3 object name in the ARN cannot contain any commas. The new replication group will have the number of node groups (console: shards) specified by the parameter *NumNodeGroups* or the number of node groups configured by *NodeGroupConfiguration* regardless of the number of ARNs specified here.\n\nExample of an Amazon S3 ARN: `arn:aws:s3:::my_bucket/snapshot1.rdb`", + "markdownDescription": "A list of Amazon Resource Names (ARN) that uniquely identify the Redis OSS RDB snapshot files stored in Amazon S3. The snapshot files are used to populate the new replication group. The Amazon S3 object name in the ARN cannot contain any commas. The new replication group will have the number of node groups (console: shards) specified by the parameter *NumNodeGroups* or the number of node groups configured by *NodeGroupConfiguration* regardless of the number of ARNs specified here.\n\nExample of an Amazon S3 ARN: `arn:aws:s3:::my_bucket/snapshot1.rdb`", "title": "SnapshotArns", "type": "array" }, @@ -91110,7 +91102,7 @@ "type": "string" }, "SnapshottingClusterId": { - "markdownDescription": "The cluster ID that is used as the daily snapshot source for the replication group. This parameter cannot be set for Redis (cluster mode enabled) replication groups.", + "markdownDescription": "The cluster ID that is used as the daily snapshot source for the replication group. This parameter cannot be set for Redis OSS (cluster mode enabled) replication groups.", "title": "SnapshottingClusterId", "type": "string" }, @@ -91123,12 +91115,12 @@ "type": "array" }, "TransitEncryptionEnabled": { - "markdownDescription": "A flag that enables in-transit encryption when set to `true` .\n\nYou cannot modify the value of `TransitEncryptionEnabled` after the cluster is created. To enable in-transit encryption on a cluster you must set `TransitEncryptionEnabled` to `true` when you create a cluster.\n\nThis parameter is valid only if the `Engine` parameter is `redis` , the `EngineVersion` parameter is `3.2.6` or `4.x` onward, and the cluster is being created in an Amazon VPC.\n\nIf you enable in-transit encryption, you must also specify a value for `CacheSubnetGroup` .\n\n*Required:* Only available when creating a replication group in an Amazon VPC using redis version `3.2.6` or `4.x` onward.\n\nDefault: `false`\n\n> For HIPAA compliance, you must specify `TransitEncryptionEnabled` as `true` , an `AuthToken` , and a `CacheSubnetGroup` .", + "markdownDescription": "A flag that enables in-transit encryption when set to `true` .\n\nYou cannot modify the value of `TransitEncryptionEnabled` after the cluster is created. To enable in-transit encryption on a cluster you must set `TransitEncryptionEnabled` to `true` when you create a cluster.\n\nThis parameter is valid only if the `Engine` parameter is `redis` , the `EngineVersion` parameter is `3.2.6` or `4.x` onward, and the cluster is being created in an Amazon VPC.\n\nIf you enable in-transit encryption, you must also specify a value for `CacheSubnetGroup` .\n\n*Required:* Only available when creating a replication group in an Amazon VPC using Redis OSS version `3.2.6` or `4.x` onward.\n\nDefault: `false`\n\n> For HIPAA compliance, you must specify `TransitEncryptionEnabled` as `true` , an `AuthToken` , and a `CacheSubnetGroup` .", "title": "TransitEncryptionEnabled", "type": "boolean" }, "TransitEncryptionMode": { - "markdownDescription": "A setting that allows you to migrate your clients to use in-transit encryption, with no downtime.\n\nWhen setting `TransitEncryptionEnabled` to `true` , you can set your `TransitEncryptionMode` to `preferred` in the same request, to allow both encrypted and unencrypted connections at the same time. Once you migrate all your Redis clients to use encrypted connections you can modify the value to `required` to allow encrypted connections only.\n\nSetting `TransitEncryptionMode` to `required` is a two-step process that requires you to first set the `TransitEncryptionMode` to `preferred` , after that you can set `TransitEncryptionMode` to `required` .\n\nThis process will not trigger the replacement of the replication group.", + "markdownDescription": "A setting that allows you to migrate your clients to use in-transit encryption, with no downtime.\n\nWhen setting `TransitEncryptionEnabled` to `true` , you can set your `TransitEncryptionMode` to `preferred` in the same request, to allow both encrypted and unencrypted connections at the same time. Once you migrate all your Redis OSS clients to use encrypted connections you can modify the value to `required` to allow encrypted connections only.\n\nSetting `TransitEncryptionMode` to `required` is a two-step process that requires you to first set the `TransitEncryptionMode` to `preferred` , after that you can set `TransitEncryptionMode` to `required` .\n\nThis process will not trigger the replacement of the replication group.", "title": "TransitEncryptionMode", "type": "string" }, @@ -91247,7 +91239,7 @@ "additionalProperties": false, "properties": { "NodeGroupId": { - "markdownDescription": "Either the ElastiCache for Redis supplied 4-digit id or a user supplied id for the node group these configuration values apply to.", + "markdownDescription": "Either the ElastiCache (Redis OSS) supplied 4-digit id or a user supplied id for the node group these configuration values apply to.", "title": "NodeGroupId", "type": "string" }, @@ -91471,7 +91463,7 @@ "title": "CacheUsageLimits" }, "DailySnapshotTime": { - "markdownDescription": "The daily time that a cache snapshot will be created. Default is NULL, i.e. snapshots will not be created at a specific time on a daily basis. Available for Redis and Serverless Memcached only.", + "markdownDescription": "The daily time that a cache snapshot will be created. Default is NULL, i.e. snapshots will not be created at a specific time on a daily basis. Available for Redis OSS and Serverless Memcached only.", "title": "DailySnapshotTime", "type": "string" }, @@ -91532,7 +91524,7 @@ "type": "array" }, "SnapshotRetentionLimit": { - "markdownDescription": "The current setting for the number of serverless cache snapshots the system will retain. Available for Redis and Serverless Memcached only.", + "markdownDescription": "The current setting for the number of serverless cache snapshots the system will retain. Available for Redis OSS and Serverless Memcached only.", "title": "SnapshotRetentionLimit", "type": "number" }, @@ -91553,7 +91545,7 @@ "type": "array" }, "UserGroupId": { - "markdownDescription": "The identifier of the user group associated with the serverless cache. Available for Redis only. Default is NULL.", + "markdownDescription": "The identifier of the user group associated with the serverless cache. Available for Redis OSS only. Default is NULL.", "title": "UserGroupId", "type": "string" } @@ -94290,7 +94282,7 @@ "items": { "$ref": "#/definitions/AWS::ElasticLoadBalancingV2::TargetGroup.TargetGroupAttribute" }, - "markdownDescription": "The attributes.", + "markdownDescription": "The target group attributes.", "title": "TargetGroupAttributes", "type": "array" }, @@ -95084,7 +95076,7 @@ }, "IdMappingTechniques": { "$ref": "#/definitions/AWS::EntityResolution::IdMappingWorkflow.IdMappingTechniques", - "markdownDescription": "An object which defines the `idMappingType` and the `providerProperties` .", + "markdownDescription": "An object which defines the ID mapping technique and any additional configurations.", "title": "IdMappingTechniques" }, "InputSourceConfig": { @@ -95171,7 +95163,7 @@ "additionalProperties": false, "properties": { "InputSourceARN": { - "markdownDescription": "An AWS Glue table ARN for the input source table.", + "markdownDescription": "An AWS Glue table Amazon Resource Name (ARN) or a matching workflow ARN for the input source table.", "title": "InputSourceARN", "type": "string" }, @@ -95181,7 +95173,7 @@ "type": "string" }, "Type": { - "markdownDescription": "The type of ID namespace. There are two types: `SOURCE` and `TARGET` .\n\nThe `SOURCE` contains configurations for `sourceId` data that will be processed in an ID mapping workflow.\n\nThe `TARGET` contains a configuration of `targetId` to which all `sourceIds` will resolve to.", + "markdownDescription": "The type of ID namespace. There are two types: `SOURCE` and `TARGET` .\n\nThe `SOURCE` contains configurations for `sourceId` data that will be processed in an ID mapping workflow.\n\nThe `TARGET` contains a configuration of `targetId` which all `sourceIds` will resolve to.", "title": "Type", "type": "string" } @@ -95329,7 +95321,7 @@ "type": "array" }, "Type": { - "markdownDescription": "The type of ID namespace. There are two types: `SOURCE` and `TARGET` .\n\nThe `SOURCE` contains configurations for `sourceId` data that will be processed in an ID mapping workflow.\n\nThe `TARGET` contains a configuration of `targetId` to which all `sourceIds` will resolve to.", + "markdownDescription": "The type of ID namespace. There are two types: `SOURCE` and `TARGET` .\n\nThe `SOURCE` contains configurations for `sourceId` data that will be processed in an ID mapping workflow.\n\nThe `TARGET` contains a configuration of `targetId` which all `sourceIds` will resolve to.", "title": "Type", "type": "string" } @@ -95384,7 +95376,7 @@ "additionalProperties": false, "properties": { "InputSourceARN": { - "markdownDescription": "An AWS Glue table ARN for the input source table.", + "markdownDescription": "An AWS Glue table Amazon Resource Name (ARN) or a matching workflow ARN for the input source table.", "title": "InputSourceARN", "type": "string" }, @@ -95703,7 +95695,7 @@ "additionalProperties": false, "properties": { "AttributeMatchingModel": { - "markdownDescription": "The comparison type. You can either choose `ONE_TO_ONE` or `MANY_TO_MANY` as the AttributeMatchingModel. When choosing `MANY_TO_MANY` , the system can match attributes across the sub-types of an attribute type. For example, if the value of the `Email` field of Profile A and the value of `BusinessEmail` field of Profile B matches, the two profiles are matched on the `Email` type. When choosing `ONE_TO_ONE` ,the system can only match if the sub-types are exact matches. For example, only when the value of the `Email` field of Profile A and the value of the `Email` field of Profile B matches, the two profiles are matched on the `Email` type.", + "markdownDescription": "The comparison type. You can either choose `ONE_TO_ONE` or `MANY_TO_MANY` as the `attributeMatchingModel` .\n\nIf you choose `MANY_TO_MANY` , the system can match attributes across the sub-types of an attribute type. For example, if the value of the `Email` field of Profile A and the value of `BusinessEmail` field of Profile B matches, the two profiles are matched on the `Email` attribute type.\n\nIf you choose `ONE_TO_ONE` , the system can only match attributes if the sub-types are an exact match. For example, for the `Email` attribute type, the system will only consider it a match if the value of the `Email` field of Profile A matches the value of the `Email` field of Profile B.", "title": "AttributeMatchingModel", "type": "string" }, @@ -95924,7 +95916,7 @@ "type": "string" }, "MatchKey": { - "markdownDescription": "A key that allows grouping of multiple input attributes into a unified matching group. For example, consider a scenario where the source table contains various addresses, such as `business_address` and `shipping_address` . By assigning a `matchKey` called `address` to both attributes, AWS Entity Resolution will match records across these fields to create a consolidated matching group. If no `matchKey` is specified for a column, it won't be utilized for matching purposes but will still be included in the output table.", + "markdownDescription": "A key that allows grouping of multiple input attributes into a unified matching group.\n\nFor example, consider a scenario where the source table contains various addresses, such as `business_address` and `shipping_address` . By assigning a `matchKey` called `address` to both attributes, AWS Entity Resolution will match records across these fields to create a consolidated matching group.\n\nIf no `matchKey` is specified for a column, it won't be utilized for matching purposes but will still be included in the output table.", "title": "MatchKey", "type": "string" }, @@ -99732,7 +99724,7 @@ "type": "boolean" }, "DataRepositoryPath": { - "markdownDescription": "The path to the Amazon S3 data repository that will be linked to the file system. The path can be an S3 bucket or prefix in the format `s3://myBucket/myPrefix/` . This path specifies where in the S3 data repository files will be imported from or exported to.", + "markdownDescription": "The path to the Amazon S3 data repository that will be linked to the file system. The path can be an S3 bucket or prefix in the format `s3://bucket-name/prefix/` . This path specifies where in the S3 data repository files will be imported from or exported to.", "title": "DataRepositoryPath", "type": "string" }, @@ -103381,7 +103373,7 @@ "items": { "$ref": "#/definitions/AWS::GameLift::Fleet.LocationConfiguration" }, - "markdownDescription": "A set of remote locations to deploy additional instances to and manage as part of the fleet. This parameter can only be used when creating fleets in AWS Regions that support multiple locations. You can add any Amazon GameLift-supported AWS Region as a remote location, in the form of an AWS Region code, such as `us-west-2` or Local Zone code. To create a fleet with instances in the home Region only, don't set this parameter.\n\nWhen using this parameter, Amazon GameLift requires you to include your home location in the request.", + "markdownDescription": "A set of remote locations to deploy additional instances to and manage as a multi-location fleet. Use this parameter when creating a fleet in AWS Regions that support multiple locations. You can add any AWS Region or Local Zone that's supported by Amazon GameLift. Provide a list of one or more AWS Region codes, such as `us-west-2` , or Local Zone names. When using this parameter, Amazon GameLift requires you to include your home location in the request. For a list of supported Regions and Local Zones, see [Amazon GameLift service locations](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-regions.html) for managed hosting.", "title": "Locations", "type": "array" }, @@ -103627,7 +103619,7 @@ "additionalProperties": false, "properties": { "Location": { - "markdownDescription": "An AWS Region code, such as `us-west-2` .", + "markdownDescription": "An AWS Region code, such as `us-west-2` . For a list of supported Regions and Local Zones, see [Amazon GameLift service locations](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-regions.html) for managed hosting.", "title": "Location", "type": "string" }, @@ -104220,7 +104212,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "A list of labels to assign to the new matchmaking configuration resource. Tags are developer-defined key-value pairs. Tagging AWS resources are useful for resource management, access management and cost allocation. For more information, see [Tagging AWS Resources](https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) in the *AWS General Rareference* .", + "markdownDescription": "A list of labels to assign to the new resource. Tags are developer-defined key-value pairs. Tagging AWS resources are useful for resource management, access management, and cost allocation. For more information, see [Tagging AWS Resources](https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) in the *AWS General Rareference* .", "title": "Tags", "type": "array" } @@ -105396,7 +105388,7 @@ "type": "object" }, "ConnectionType": { - "markdownDescription": "The type of the connection. Currently, these types are supported:\n\n- `JDBC` - Designates a connection to a database through Java Database Connectivity (JDBC).\n\n`JDBC` Connections use the following ConnectionParameters.\n\n- Required: All of ( `HOST` , `PORT` , `JDBC_ENGINE` ) or `JDBC_CONNECTION_URL` .\n- Required: All of ( `USERNAME` , `PASSWORD` ) or `SECRET_ID` .\n- Optional: `JDBC_ENFORCE_SSL` , `CUSTOM_JDBC_CERT` , `CUSTOM_JDBC_CERT_STRING` , `SKIP_CUSTOM_JDBC_CERT_VALIDATION` . These parameters are used to configure SSL with JDBC.\n- `KAFKA` - Designates a connection to an Apache Kafka streaming platform.\n\n`KAFKA` Connections use the following ConnectionParameters.\n\n- Required: `KAFKA_BOOTSTRAP_SERVERS` .\n- Optional: `KAFKA_SSL_ENABLED` , `KAFKA_CUSTOM_CERT` , `KAFKA_SKIP_CUSTOM_CERT_VALIDATION` . These parameters are used to configure SSL with `KAFKA` .\n- Optional: `KAFKA_CLIENT_KEYSTORE` , `KAFKA_CLIENT_KEYSTORE_PASSWORD` , `KAFKA_CLIENT_KEY_PASSWORD` , `ENCRYPTED_KAFKA_CLIENT_KEYSTORE_PASSWORD` , `ENCRYPTED_KAFKA_CLIENT_KEY_PASSWORD` . These parameters are used to configure TLS client configuration with SSL in `KAFKA` .\n- Optional: `KAFKA_SASL_MECHANISM` . Can be specified as `SCRAM-SHA-512` , `GSSAPI` , or `AWS_MSK_IAM` .\n- Optional: `KAFKA_SASL_SCRAM_USERNAME` , `KAFKA_SASL_SCRAM_PASSWORD` , `ENCRYPTED_KAFKA_SASL_SCRAM_PASSWORD` . These parameters are used to configure SASL/SCRAM-SHA-512 authentication with `KAFKA` .\n- Optional: `KAFKA_SASL_GSSAPI_KEYTAB` , `KAFKA_SASL_GSSAPI_KRB5_CONF` , `KAFKA_SASL_GSSAPI_SERVICE` , `KAFKA_SASL_GSSAPI_PRINCIPAL` . These parameters are used to configure SASL/GSSAPI authentication with `KAFKA` .\n- `MONGODB` - Designates a connection to a MongoDB document database.\n\n`MONGODB` Connections use the following ConnectionParameters.\n\n- Required: `CONNECTION_URL` .\n- Required: All of ( `USERNAME` , `PASSWORD` ) or `SECRET_ID` .\n- `SALESFORCE` - Designates a connection to Salesforce using OAuth authencation.\n\n- Requires the `AuthenticationConfiguration` member to be configured.\n- `NETWORK` - Designates a network connection to a data source within an Amazon Virtual Private Cloud environment (Amazon VPC).\n\n`NETWORK` Connections do not require ConnectionParameters. Instead, provide a PhysicalConnectionRequirements.\n- `MARKETPLACE` - Uses configuration settings contained in a connector purchased from AWS Marketplace to read from and write to data stores that are not natively supported by AWS Glue .\n\n`MARKETPLACE` Connections use the following ConnectionParameters.\n\n- Required: `CONNECTOR_TYPE` , `CONNECTOR_URL` , `CONNECTOR_CLASS_NAME` , `CONNECTION_URL` .\n- Required for `JDBC` `CONNECTOR_TYPE` connections: All of ( `USERNAME` , `PASSWORD` ) or `SECRET_ID` .\n- `CUSTOM` - Uses configuration settings contained in a custom connector to read from and write to data stores that are not natively supported by AWS Glue .\n\n`SFTP` is not supported.\n\nFor more information about how optional ConnectionProperties are used to configure features in AWS Glue , consult [AWS Glue connection properties](https://docs.aws.amazon.com/glue/latest/dg/connection-defining.html) .\n\nFor more information about how optional ConnectionProperties are used to configure features in AWS Glue Studio, consult [Using connectors and connections](https://docs.aws.amazon.com/glue/latest/ug/connectors-chapter.html) .", + "markdownDescription": "The type of the connection. Currently, these types are supported:\n\n- `JDBC` - Designates a connection to a database through Java Database Connectivity (JDBC).\n\n`JDBC` Connections use the following ConnectionParameters.\n\n- Required: All of ( `HOST` , `PORT` , `JDBC_ENGINE` ) or `JDBC_CONNECTION_URL` .\n- Required: All of ( `USERNAME` , `PASSWORD` ) or `SECRET_ID` .\n- Optional: `JDBC_ENFORCE_SSL` , `CUSTOM_JDBC_CERT` , `CUSTOM_JDBC_CERT_STRING` , `SKIP_CUSTOM_JDBC_CERT_VALIDATION` . These parameters are used to configure SSL with JDBC.\n- `KAFKA` - Designates a connection to an Apache Kafka streaming platform.\n\n`KAFKA` Connections use the following ConnectionParameters.\n\n- Required: `KAFKA_BOOTSTRAP_SERVERS` .\n- Optional: `KAFKA_SSL_ENABLED` , `KAFKA_CUSTOM_CERT` , `KAFKA_SKIP_CUSTOM_CERT_VALIDATION` . These parameters are used to configure SSL with `KAFKA` .\n- Optional: `KAFKA_CLIENT_KEYSTORE` , `KAFKA_CLIENT_KEYSTORE_PASSWORD` , `KAFKA_CLIENT_KEY_PASSWORD` , `ENCRYPTED_KAFKA_CLIENT_KEYSTORE_PASSWORD` , `ENCRYPTED_KAFKA_CLIENT_KEY_PASSWORD` . These parameters are used to configure TLS client configuration with SSL in `KAFKA` .\n- Optional: `KAFKA_SASL_MECHANISM` . Can be specified as `SCRAM-SHA-512` , `GSSAPI` , or `AWS_MSK_IAM` .\n- Optional: `KAFKA_SASL_SCRAM_USERNAME` , `KAFKA_SASL_SCRAM_PASSWORD` , `ENCRYPTED_KAFKA_SASL_SCRAM_PASSWORD` . These parameters are used to configure SASL/SCRAM-SHA-512 authentication with `KAFKA` .\n- Optional: `KAFKA_SASL_GSSAPI_KEYTAB` , `KAFKA_SASL_GSSAPI_KRB5_CONF` , `KAFKA_SASL_GSSAPI_SERVICE` , `KAFKA_SASL_GSSAPI_PRINCIPAL` . These parameters are used to configure SASL/GSSAPI authentication with `KAFKA` .\n- `MONGODB` - Designates a connection to a MongoDB document database.\n\n`MONGODB` Connections use the following ConnectionParameters.\n\n- Required: `CONNECTION_URL` .\n- Required: All of ( `USERNAME` , `PASSWORD` ) or `SECRET_ID` .\n- `SALESFORCE` - Designates a connection to Salesforce using OAuth authencation.\n\n- Requires the `AuthenticationConfiguration` member to be configured.\n- `VIEW_VALIDATION_REDSHIFT` - Designates a connection used for view validation by Amazon Redshift.\n- `VIEW_VALIDATION_ATHENA` - Designates a connection used for view validation by Amazon Athena.\n- `NETWORK` - Designates a network connection to a data source within an Amazon Virtual Private Cloud environment (Amazon VPC).\n\n`NETWORK` Connections do not require ConnectionParameters. Instead, provide a PhysicalConnectionRequirements.\n- `MARKETPLACE` - Uses configuration settings contained in a connector purchased from AWS Marketplace to read from and write to data stores that are not natively supported by AWS Glue .\n\n`MARKETPLACE` Connections use the following ConnectionParameters.\n\n- Required: `CONNECTOR_TYPE` , `CONNECTOR_URL` , `CONNECTOR_CLASS_NAME` , `CONNECTION_URL` .\n- Required for `JDBC` `CONNECTOR_TYPE` connections: All of ( `USERNAME` , `PASSWORD` ) or `SECRET_ID` .\n- `CUSTOM` - Uses configuration settings contained in a custom connector to read from and write to data stores that are not natively supported by AWS Glue .\n\n`SFTP` is not supported.\n\nFor more information about how optional ConnectionProperties are used to configure features in AWS Glue , consult [AWS Glue connection properties](https://docs.aws.amazon.com/glue/latest/dg/connection-defining.html) .\n\nFor more information about how optional ConnectionProperties are used to configure features in AWS Glue Studio, consult [Using connectors and connections](https://docs.aws.amazon.com/glue/latest/ug/connectors-chapter.html) .", "title": "ConnectionType", "type": "string" }, @@ -106999,7 +106991,7 @@ "title": "FindMatchesParameters" }, "TransformType": { - "markdownDescription": "The type of machine learning transform. `FIND_MATCHES` is the only option.\n\nFor information about the types of machine learning transforms, see [Creating Machine Learning Transforms](https://docs.aws.amazon.com/glue/latest/dg/add-job-machine-learning-transform.html) .", + "markdownDescription": "The type of machine learning transform. `FIND_MATCHES` is the only option.\n\nFor information about the types of machine learning transforms, see [Working with machine learning transforms](https://docs.aws.amazon.com/glue/latest/dg/console-machine-learning-transforms.html) .", "title": "TransformType", "type": "string" } @@ -112680,27 +112672,27 @@ "additionalProperties": false, "properties": { "AgentStatus": { - "markdownDescription": "", + "markdownDescription": "The status of AgentEndpoint.", "title": "AgentStatus", "type": "string" }, "AuditResults": { - "markdownDescription": "", + "markdownDescription": "The results of the audit.", "title": "AuditResults", "type": "string" }, "EgressAddress": { "$ref": "#/definitions/AWS::GroundStation::DataflowEndpointGroup.ConnectionDetails", - "markdownDescription": "", + "markdownDescription": "The egress address of AgentEndpoint.", "title": "EgressAddress" }, "IngressAddress": { "$ref": "#/definitions/AWS::GroundStation::DataflowEndpointGroup.RangedConnectionDetails", - "markdownDescription": "", + "markdownDescription": "The ingress address of AgentEndpoint.", "title": "IngressAddress" }, "Name": { - "markdownDescription": "", + "markdownDescription": "Name string associated with AgentEndpoint. Used as a human-readable identifier for AgentEndpoint.", "title": "Name", "type": "string" } @@ -112992,12 +112984,12 @@ "additionalProperties": false, "properties": { "KmsAliasArn": { - "markdownDescription": "", + "markdownDescription": "KMS Alias Arn.", "title": "KmsAliasArn", "type": "string" }, "KmsKeyArn": { - "markdownDescription": "", + "markdownDescription": "KMS Key Arn.", "title": "KmsKeyArn", "type": "string" } @@ -113292,7 +113284,7 @@ "type": "string" }, "DetectorId": { - "markdownDescription": "The ID of the detector belonging to the GuardDuty account that you want to create a filter for.", + "markdownDescription": "The detector ID associated with the GuardDuty account for which you want to create a filter.\n\nTo find the `detectorId` in the current Region, see the\nSettings page in the GuardDuty console, or run the [ListDetectors](https://docs.aws.amazon.com/guardduty/latest/APIReference/API_ListDetectors.html) API.", "title": "DetectorId", "type": "string" }, @@ -113502,7 +113494,7 @@ "type": "boolean" }, "DetectorId": { - "markdownDescription": "The unique ID of the detector of the GuardDuty account that you want to create an IPSet for.", + "markdownDescription": "The unique ID of the detector of the GuardDuty account for which you want to create an IPSet.\n\nTo find the `detectorId` in the current Region, see the\nSettings page in the GuardDuty console, or run the [ListDetectors](https://docs.aws.amazon.com/guardduty/latest/APIReference/API_ListDetectors.html) API.", "title": "DetectorId", "type": "string" }, @@ -113613,7 +113605,7 @@ "additionalProperties": false, "properties": { "DetectorId": { - "markdownDescription": "The unique ID of the detector of the GuardDuty member account.", + "markdownDescription": "The unique ID of the detector of the GuardDuty member account.\n\nTo find the `detectorId` in the current Region, see the\nSettings page in the GuardDuty console, or run the [ListDetectors](https://docs.aws.amazon.com/guardduty/latest/APIReference/API_ListDetectors.html) API.", "title": "DetectorId", "type": "string" }, @@ -113788,7 +113780,7 @@ "type": "boolean" }, "DetectorId": { - "markdownDescription": "The unique ID of the detector of the GuardDuty account that you want to create a threatIntelSet for.", + "markdownDescription": "The unique ID of the detector of the GuardDuty account for which you want to create a `ThreatIntelSet` .\n\nTo find the `detectorId` in the current Region, see the\nSettings page in the GuardDuty console, or run the [ListDetectors](https://docs.aws.amazon.com/guardduty/latest/APIReference/API_ListDetectors.html) API.", "title": "DetectorId", "type": "string" }, @@ -114633,7 +114625,7 @@ "items": { "type": "string" }, - "markdownDescription": "A list of certificate thumbprints that are associated with the specified IAM OIDC provider resource object. For more information, see [CreateOpenIDConnectProvider](https://docs.aws.amazon.com/IAM/latest/APIReference/API_CreateOpenIDConnectProvider.html) .", + "markdownDescription": "A list of certificate thumbprints that are associated with the specified IAM OIDC provider resource object. For more information, see [CreateOpenIDConnectProvider](https://docs.aws.amazon.com/IAM/latest/APIReference/API_CreateOpenIDConnectProvider.html) .\n\nThis property is optional. If it is not included, IAM will retrieve and use the top intermediate certificate authority (CA) thumbprint of the OpenID Connect identity provider server certificate.", "title": "ThumbprintList", "type": "array" }, @@ -116693,7 +116685,7 @@ "type": "string" }, "DisplayName": { - "markdownDescription": "The display name value for the group. The length limit is 1,024 characters. This value can consist of letters, accented characters, symbols, numbers, punctuation, tab, new line, carriage return, space, and nonbreaking space in this attribute. This value is specified at the time the group is created and stored as an attribute of the group object in the identity store.", + "markdownDescription": "The display name value for the group. The length limit is 1,024 characters. This value can consist of letters, accented characters, symbols, numbers, punctuation, tab, new line, carriage return, space, and nonbreaking space in this attribute. This value is specified at the time the group is created and stored as an attribute of the group object in the identity store.\n\nPrefix search supports a maximum of 1,000 characters for the string.", "title": "DisplayName", "type": "string" }, @@ -117721,7 +117713,7 @@ "items": { "type": "string" }, - "markdownDescription": "Tags for Image Builder to apply to the output container image that &INS; scans. Tags can help you identify and manage your scanned images.", + "markdownDescription": "Tags for Image Builder to apply to the output container image that Amazon Inspector scans. Tags can help you identify and manage your scanned images.", "title": "ContainerTags", "type": "array" }, @@ -117962,7 +117954,7 @@ "items": { "type": "string" }, - "markdownDescription": "Tags for Image Builder to apply to the output container image that &INS; scans. Tags can help you identify and manage your scanned images.", + "markdownDescription": "Tags for Image Builder to apply to the output container image that Amazon Inspector scans. Tags can help you identify and manage your scanned images.", "title": "ContainerTags", "type": "array" }, @@ -129708,7 +129700,7 @@ "type": "array" }, "AssetModelName": { - "markdownDescription": "A unique, friendly name for the asset model.", + "markdownDescription": "A unique name for the asset model.", "title": "AssetModelName", "type": "string" }, @@ -129860,7 +129852,7 @@ "additionalProperties": false, "properties": { "DataType": { - "markdownDescription": "The data type of the asset model property.", + "markdownDescription": "The data type of the asset model property.\n\nIf you specify `STRUCT` , you must also specify `dataTypeSpec` to identify the type of the structure for this property.", "title": "DataType", "type": "string" }, @@ -130243,7 +130235,7 @@ "type": "array" }, "GatewayName": { - "markdownDescription": "A unique, friendly name for the gateway.", + "markdownDescription": "A unique name for the gateway.", "title": "GatewayName", "type": "string" }, @@ -130322,7 +130314,7 @@ }, "SiemensIE": { "$ref": "#/definitions/AWS::IoTSiteWise::Gateway.SiemensIE", - "markdownDescription": "", + "markdownDescription": "A AWS IoT SiteWise Edge gateway that runs on a Siemens Industrial Edge Device.", "title": "SiemensIE" } }, @@ -130360,7 +130352,7 @@ "additionalProperties": false, "properties": { "IotCoreThingName": { - "markdownDescription": "", + "markdownDescription": "The name of the AWS IoT Thing for your AWS IoT SiteWise Edge gateway.", "title": "IotCoreThingName", "type": "string" } @@ -131672,7 +131664,7 @@ "type": "object" }, "WorkspaceId": { - "markdownDescription": "", + "markdownDescription": "The ID of the workspace.", "title": "WorkspaceId", "type": "string" } @@ -139209,12 +139201,12 @@ "title": "DeliveryStreamEncryptionConfigurationInput" }, "DeliveryStreamName": { - "markdownDescription": "The name of the delivery stream.", + "markdownDescription": "The name of the Firehose stream.", "title": "DeliveryStreamName", "type": "string" }, "DeliveryStreamType": { - "markdownDescription": "The delivery stream type. This can be one of the following values:\n\n- `DirectPut` : Provider applications access the delivery stream directly.\n- `KinesisStreamAsSource` : The delivery stream uses a Kinesis data stream as a source.", + "markdownDescription": "The Firehose stream type. This can be one of the following values:\n\n- `DirectPut` : Provider applications access the Firehose stream directly.\n- `KinesisStreamAsSource` : The Firehose stream uses a Kinesis data stream as a source.", "title": "DeliveryStreamType", "type": "string" }, @@ -139267,7 +139259,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "A set of tags to assign to the delivery stream. A tag is a key-value pair that you can define and assign to AWS resources. Tags are metadata. For example, you can add friendly names and descriptions or other types of information that can help you distinguish the delivery stream. For more information about tags, see [Using Cost Allocation Tags](https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html) in the AWS Billing and Cost Management User Guide.\n\nYou can specify up to 50 tags when creating a delivery stream.\n\nIf you specify tags in the `CreateDeliveryStream` action, Amazon Data Firehose performs an additional authorization on the `firehose:TagDeliveryStream` action to verify if users have permissions to create tags. If you do not provide this permission, requests to create new Firehose delivery streams with IAM resource tags will fail with an `AccessDeniedException` such as following.\n\n*AccessDeniedException*\n\nUser: arn:aws:sts::x:assumed-role/x/x is not authorized to perform: firehose:TagDeliveryStream on resource: arn:aws:firehose:us-east-1:x:deliverystream/x with an explicit deny in an identity-based policy.\n\nFor an example IAM policy, see [Tag example.](https://docs.aws.amazon.com/firehose/latest/APIReference/API_CreateDeliveryStream.html#API_CreateDeliveryStream_Examples)", + "markdownDescription": "A set of tags to assign to the Firehose stream. A tag is a key-value pair that you can define and assign to AWS resources. Tags are metadata. For example, you can add friendly names and descriptions or other types of information that can help you distinguish the Firehose stream. For more information about tags, see [Using Cost Allocation Tags](https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html) in the AWS Billing and Cost Management User Guide.\n\nYou can specify up to 50 tags when creating a Firehose stream.\n\nIf you specify tags in the `CreateDeliveryStream` action, Amazon Data Firehose performs an additional authorization on the `firehose:TagDeliveryStream` action to verify if users have permissions to create tags. If you do not provide this permission, requests to create new Firehose Firehose streams with IAM resource tags will fail with an `AccessDeniedException` such as following.\n\n*AccessDeniedException*\n\nUser: arn:aws:sts::x:assumed-role/x/x is not authorized to perform: firehose:TagDeliveryStream on resource: arn:aws:firehose:us-east-1:x:deliverystream/x with an explicit deny in an identity-based policy.\n\nFor an example IAM policy, see [Tag example.](https://docs.aws.amazon.com/firehose/latest/APIReference/API_CreateDeliveryStream.html#API_CreateDeliveryStream_Examples)", "title": "Tags", "type": "array" } @@ -139303,7 +139295,7 @@ "type": "number" }, "SizeInMBs": { - "markdownDescription": "Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5.\n\nWe recommend setting this parameter to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec, the value should be 10 MB or higher.", + "markdownDescription": "Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5.\n\nWe recommend setting this parameter to a value greater than the amount of data you typically ingest into the Firehose stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec, the value should be 10 MB or higher.", "title": "SizeInMBs", "type": "number" } @@ -139801,7 +139793,7 @@ }, "CloudWatchLoggingOptions": { "$ref": "#/definitions/AWS::KinesisFirehose::DeliveryStream.CloudWatchLoggingOptions", - "markdownDescription": "The Amazon CloudWatch logging options for your delivery stream.", + "markdownDescription": "The Amazon CloudWatch logging options for your Firehose stream.", "title": "CloudWatchLoggingOptions" }, "CompressionFormat": { @@ -139860,7 +139852,7 @@ "title": "S3BackupConfiguration" }, "S3BackupMode": { - "markdownDescription": "The Amazon S3 backup mode. After you create a delivery stream, you can update it to enable Amazon S3 backup if it is disabled. If backup is enabled, you can't update the delivery stream to disable it.", + "markdownDescription": "The Amazon S3 backup mode. After you create a Firehose stream, you can update it to enable Amazon S3 backup if it is disabled. If backup is enabled, you can't update the Firehose stream to disable it.", "title": "S3BackupMode", "type": "string" } @@ -140273,7 +140265,7 @@ "properties": { "CloudWatchLoggingOptions": { "$ref": "#/definitions/AWS::KinesisFirehose::DeliveryStream.CloudWatchLoggingOptions", - "markdownDescription": "The CloudWatch logging options for your delivery stream.", + "markdownDescription": "The CloudWatch logging options for your Firehose stream.", "title": "CloudWatchLoggingOptions" }, "ClusterJDBCURL": { @@ -140312,7 +140304,7 @@ "title": "S3BackupConfiguration" }, "S3BackupMode": { - "markdownDescription": "The Amazon S3 backup mode. After you create a delivery stream, you can update it to enable Amazon S3 backup if it is disabled. If backup is enabled, you can't update the delivery stream to disable it.", + "markdownDescription": "The Amazon S3 backup mode. After you create a Firehose stream, you can update it to enable Amazon S3 backup if it is disabled. If backup is enabled, you can't update the Firehose stream to disable it.", "title": "S3BackupMode", "type": "string" }, @@ -140374,7 +140366,7 @@ }, "CloudWatchLoggingOptions": { "$ref": "#/definitions/AWS::KinesisFirehose::DeliveryStream.CloudWatchLoggingOptions", - "markdownDescription": "The CloudWatch logging options for your delivery stream.", + "markdownDescription": "The CloudWatch logging options for your Firehose stream.", "title": "CloudWatchLoggingOptions" }, "CompressionFormat": { @@ -140506,7 +140498,7 @@ }, "ProcessingConfiguration": { "$ref": "#/definitions/AWS::KinesisFirehose::DeliveryStream.ProcessingConfiguration", - "markdownDescription": "", + "markdownDescription": "Specifies configuration for Snowflake.", "title": "ProcessingConfiguration" }, "RetryOptions": { @@ -140634,7 +140626,7 @@ }, "CloudWatchLoggingOptions": { "$ref": "#/definitions/AWS::KinesisFirehose::DeliveryStream.CloudWatchLoggingOptions", - "markdownDescription": "The Amazon CloudWatch logging options for your delivery stream.", + "markdownDescription": "The Amazon CloudWatch logging options for your Firehose stream.", "title": "CloudWatchLoggingOptions" }, "HECAcknowledgmentTimeoutInSeconds": { @@ -142681,7 +142673,7 @@ "items": { "type": "string" }, - "markdownDescription": "(Streams and SQS) A list of current response type enums applied to the event source mapping.\n\nValid Values: `ReportBatchItemFailures`", + "markdownDescription": "(Kinesis, DynamoDB Streams, and SQS) A list of current response type enums applied to the event source mapping.\n\nValid Values: `ReportBatchItemFailures`", "title": "FunctionResponseTypes", "type": "array" }, @@ -142980,7 +142972,7 @@ }, "Code": { "$ref": "#/definitions/AWS::Lambda::Function.Code", - "markdownDescription": "The code for the function.", + "markdownDescription": "The code for the function. You can define your function code in multiple ways:\n\n- For .zip deployment packages, you can specify the Amazon S3 location of the .zip file in the `S3Bucket` , `S3Key` , and `S3ObjectVersion` properties.\n- For .zip deployment packages, you can alternatively define the function code inline in the `ZipFile` property. This method works only for Node.js and Python functions.\n- For container images, specify the URI of your container image in the Amazon ECR registry in the `ImageUri` property.", "title": "Code" }, "CodeSigningConfigArn": { @@ -143032,7 +143024,7 @@ "title": "ImageConfig" }, "KmsKeyArn": { - "markdownDescription": "The ARN of the AWS Key Management Service ( AWS KMS ) customer managed key that's used to encrypt your function's [environment variables](https://docs.aws.amazon.com/lambda/latest/dg/configuration-envvars.html#configuration-envvars-encryption) . When [Lambda SnapStart](https://docs.aws.amazon.com/lambda/latest/dg/snapstart-security.html) is activated, Lambda also uses this key is to encrypt your function's snapshot. If you deploy your function using a container image, Lambda also uses this key to encrypt your function when it's deployed. Note that this is not the same key that's used to protect your container image in the Amazon Elastic Container Registry (Amazon ECR).\nIf you don't provide a customer managed key, Lambda uses a default service key.", + "markdownDescription": "The ARN of the AWS Key Management Service ( AWS KMS ) customer managed key that's used to encrypt your function's [environment variables](https://docs.aws.amazon.com/lambda/latest/dg/configuration-envvars.html#configuration-envvars-encryption) . When [Lambda SnapStart](https://docs.aws.amazon.com/lambda/latest/dg/snapstart-security.html) is activated, Lambda also uses this key is to encrypt your function's snapshot. If you deploy your function using a container image, Lambda also uses this key to encrypt your function when it's deployed. Note that this is not the same key that's used to protect your container image in the Amazon Elastic Container Registry ( Amazon ECR ). If you don't provide a customer managed key, Lambda uses a default service key.", "title": "KmsKeyArn", "type": "string" }, @@ -143088,7 +143080,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "A list of [tags](https://docs.aws.amazon.com/lambda/latest/dg/tagging.html) to apply to the function.", + "markdownDescription": "A list of [tags](https://docs.aws.amazon.com/lambda/latest/dg/tagging.html) to apply to the function.\n\n> You must have the `lambda:TagResource` , `lambda:UntagResource` , and `lambda:ListTags` permissions for your [IAM principal](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts.html) to manage the AWS CloudFormation stack. If you don't have these permissions, there might be unexpected behavior with stack-level tags propagating to the resource during resource creation and update.", "title": "Tags", "type": "array" }, @@ -143631,7 +143623,7 @@ "type": "string" }, "Principal": { - "markdownDescription": "The AWS service or AWS account that invokes the function. If you specify a service, use `SourceArn` or `SourceAccount` to limit who can invoke the function through that service.", + "markdownDescription": "The AWS service , AWS account , IAM user, or IAM role that invokes the function. If you specify a service, use `SourceArn` or `SourceAccount` to limit who can invoke the function through that service.", "title": "Principal", "type": "string" }, @@ -143735,7 +143727,7 @@ "type": "string" }, "TargetFunctionArn": { - "markdownDescription": "The name of the Lambda function.\n\n**Name formats** - *Function name* - `my-function` .\n- *Function ARN* - `arn:aws:lambda:us-west-2:123456789012:function:my-function` .\n- *Partial ARN* - `123456789012:function:my-function` .\n\nThe length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.", + "markdownDescription": "The name of the Lambda function.\n\n**Name formats** - *Function name* - `my-function` .\n- *Function ARN* - `lambda: : :function:my-function` .\n- *Partial ARN* - `:function:my-function` .\n\nThe length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.", "title": "TargetFunctionArn", "type": "string" } @@ -144253,7 +144245,7 @@ }, "VoiceSettings": { "$ref": "#/definitions/AWS::Lex::Bot.VoiceSettings", - "markdownDescription": "Defines settings for using an Amazon Polly voice to communicate with a user.", + "markdownDescription": "Defines settings for using an Amazon Polly voice to communicate with a user.\n\nValid values include:\n\n- `standard`\n- `neural`\n- `long-form`\n- `generative`", "title": "VoiceSettings" } }, @@ -150454,7 +150446,7 @@ "type": "array" }, "Name": { - "markdownDescription": "A name for the query definition.\n\n> You can use the name to create a folder structure for your queries. To create a folder, use a forward slash (/) to prefix your desired query name with your desired folder name. For example, `/ *folder-name* / *query-name*` .", + "markdownDescription": "A name for the query definition.\n\n> You can use the name to create a folder structure for your queries. To create a folder, use a forward slash (/) to prefix your desired query name with your desired folder name. For example, `*folder-name* / *query-name*` .", "title": "Name", "type": "string" }, @@ -152046,67 +152038,67 @@ "properties": { "BrokerNodeGroupInfo": { "$ref": "#/definitions/AWS::MSK::Cluster.BrokerNodeGroupInfo", - "markdownDescription": "Information about the broker nodes in the cluster.", + "markdownDescription": "", "title": "BrokerNodeGroupInfo" }, "ClientAuthentication": { "$ref": "#/definitions/AWS::MSK::Cluster.ClientAuthentication", - "markdownDescription": "Includes all client authentication related information.", + "markdownDescription": "", "title": "ClientAuthentication" }, "ClusterName": { - "markdownDescription": "The name of the cluster.", + "markdownDescription": "", "title": "ClusterName", "type": "string" }, "ConfigurationInfo": { "$ref": "#/definitions/AWS::MSK::Cluster.ConfigurationInfo", - "markdownDescription": "Represents the configuration that you want MSK to use for the cluster.", + "markdownDescription": "", "title": "ConfigurationInfo" }, "CurrentVersion": { - "markdownDescription": "The version of the cluster that you want to update.", + "markdownDescription": "", "title": "CurrentVersion", "type": "string" }, "EncryptionInfo": { "$ref": "#/definitions/AWS::MSK::Cluster.EncryptionInfo", - "markdownDescription": "Includes all encryption-related information.", + "markdownDescription": "", "title": "EncryptionInfo" }, "EnhancedMonitoring": { - "markdownDescription": "Specifies the level of monitoring for the MSK cluster. The possible values are `DEFAULT` , `PER_BROKER` , and `PER_TOPIC_PER_BROKER` .", + "markdownDescription": "", "title": "EnhancedMonitoring", "type": "string" }, "KafkaVersion": { - "markdownDescription": "The version of Apache Kafka. You can use Amazon MSK to create clusters that use Apache Kafka versions 1.1.1 and 2.2.1.", + "markdownDescription": "", "title": "KafkaVersion", "type": "string" }, "LoggingInfo": { "$ref": "#/definitions/AWS::MSK::Cluster.LoggingInfo", - "markdownDescription": "Logging Info details.", + "markdownDescription": "", "title": "LoggingInfo" }, "NumberOfBrokerNodes": { - "markdownDescription": "The number of broker nodes in the cluster.", + "markdownDescription": "", "title": "NumberOfBrokerNodes", "type": "number" }, "OpenMonitoring": { "$ref": "#/definitions/AWS::MSK::Cluster.OpenMonitoring", - "markdownDescription": "The settings for open monitoring.", + "markdownDescription": "", "title": "OpenMonitoring" }, "StorageMode": { - "markdownDescription": "This controls storage mode for supported storage tiers.", + "markdownDescription": "", "title": "StorageMode", "type": "string" }, "Tags": { "additionalProperties": true, - "markdownDescription": "Create tags when creating the cluster.", + "markdownDescription": "", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" @@ -152150,17 +152142,17 @@ "properties": { "CloudWatchLogs": { "$ref": "#/definitions/AWS::MSK::Cluster.CloudWatchLogs", - "markdownDescription": "Details of the CloudWatch Logs destination for broker logs.", + "markdownDescription": "", "title": "CloudWatchLogs" }, "Firehose": { "$ref": "#/definitions/AWS::MSK::Cluster.Firehose", - "markdownDescription": "Details of the Kinesis Data Firehose delivery stream that is the destination for broker logs.", + "markdownDescription": "", "title": "Firehose" }, "S3": { "$ref": "#/definitions/AWS::MSK::Cluster.S3", - "markdownDescription": "Details of the Amazon S3 destination for broker logs.", + "markdownDescription": "", "title": "S3" } }, @@ -152170,7 +152162,7 @@ "additionalProperties": false, "properties": { "BrokerAZDistribution": { - "markdownDescription": "This parameter is currently not in use.", + "markdownDescription": "", "title": "BrokerAZDistribution", "type": "string" }, @@ -152178,13 +152170,13 @@ "items": { "type": "string" }, - "markdownDescription": "The list of subnets to connect to in the client virtual private cloud (VPC). Amazon creates elastic network interfaces inside these subnets. Client applications use elastic network interfaces to produce and consume data.\n\nIf you use the US West (N. California) Region, specify exactly two subnets. For other Regions where Amazon MSK is available, you can specify either two or three subnets. The subnets that you specify must be in distinct Availability Zones. When you create a cluster, Amazon MSK distributes the broker nodes evenly across the subnets that you specify.\n\nClient subnets can't occupy the Availability Zone with ID `use1-az3` .", + "markdownDescription": "", "title": "ClientSubnets", "type": "array" }, "ConnectivityInfo": { "$ref": "#/definitions/AWS::MSK::Cluster.ConnectivityInfo", - "markdownDescription": "Information about the cluster's connectivity setting.", + "markdownDescription": "", "title": "ConnectivityInfo" }, "InstanceType": { @@ -152196,13 +152188,13 @@ "items": { "type": "string" }, - "markdownDescription": "The security groups to associate with the elastic network interfaces in order to specify who can connect to and communicate with the Amazon MSK cluster. If you don't specify a security group, Amazon MSK uses the default security group associated with the VPC. If you specify security groups that were shared with you, you must ensure that you have permissions to them. Specifically, you need the `ec2:DescribeSecurityGroups` permission.", + "markdownDescription": "", "title": "SecurityGroups", "type": "array" }, "StorageInfo": { "$ref": "#/definitions/AWS::MSK::Cluster.StorageInfo", - "markdownDescription": "Contains information about storage volumes attached to Amazon MSK broker nodes.", + "markdownDescription": "", "title": "StorageInfo" } }, @@ -152217,17 +152209,17 @@ "properties": { "Sasl": { "$ref": "#/definitions/AWS::MSK::Cluster.Sasl", - "markdownDescription": "Details for client authentication using SASL. To turn on SASL, you must also turn on `EncryptionInTransit` by setting `inCluster` to true. You must set `clientBroker` to either `TLS` or `TLS_PLAINTEXT` . If you choose `TLS_PLAINTEXT` , then you must also set `unauthenticated` to true.", + "markdownDescription": "", "title": "Sasl" }, "Tls": { "$ref": "#/definitions/AWS::MSK::Cluster.Tls", - "markdownDescription": "Details for ClientAuthentication using TLS. To turn on TLS access control, you must also turn on `EncryptionInTransit` by setting `inCluster` to true and `clientBroker` to `TLS` .", + "markdownDescription": "", "title": "Tls" }, "Unauthenticated": { "$ref": "#/definitions/AWS::MSK::Cluster.Unauthenticated", - "markdownDescription": "Details for ClientAuthentication using no authentication.", + "markdownDescription": "", "title": "Unauthenticated" } }, @@ -152237,12 +152229,12 @@ "additionalProperties": false, "properties": { "Enabled": { - "markdownDescription": "Specifies whether broker logs get sent to the specified CloudWatch Logs destination.", + "markdownDescription": "", "title": "Enabled", "type": "boolean" }, "LogGroup": { - "markdownDescription": "The CloudWatch log group that is the destination for broker logs.", + "markdownDescription": "", "title": "LogGroup", "type": "string" } @@ -152256,12 +152248,12 @@ "additionalProperties": false, "properties": { "Arn": { - "markdownDescription": "ARN of the configuration to use.", + "markdownDescription": "", "title": "Arn", "type": "string" }, "Revision": { - "markdownDescription": "The revision of the configuration to use.", + "markdownDescription": "", "title": "Revision", "type": "number" } @@ -152277,12 +152269,12 @@ "properties": { "PublicAccess": { "$ref": "#/definitions/AWS::MSK::Cluster.PublicAccess", - "markdownDescription": "Access control settings for the cluster's brokers.", + "markdownDescription": "", "title": "PublicAccess" }, "VpcConnectivity": { "$ref": "#/definitions/AWS::MSK::Cluster.VpcConnectivity", - "markdownDescription": "VPC connection control settings for brokers", + "markdownDescription": "", "title": "VpcConnectivity" } }, @@ -152293,11 +152285,11 @@ "properties": { "ProvisionedThroughput": { "$ref": "#/definitions/AWS::MSK::Cluster.ProvisionedThroughput", - "markdownDescription": "EBS volume provisioned throughput information.", + "markdownDescription": "", "title": "ProvisionedThroughput" }, "VolumeSize": { - "markdownDescription": "The size in GiB of the EBS volume for the data drive on each broker node.", + "markdownDescription": "", "title": "VolumeSize", "type": "number" } @@ -152308,7 +152300,7 @@ "additionalProperties": false, "properties": { "DataVolumeKMSKeyId": { - "markdownDescription": "The Amazon Resource Name (ARN) of the Amazon KMS key for encrypting data at rest. If you don't specify a KMS key, MSK creates one for you and uses it.", + "markdownDescription": "", "title": "DataVolumeKMSKeyId", "type": "string" } @@ -152322,12 +152314,12 @@ "additionalProperties": false, "properties": { "ClientBroker": { - "markdownDescription": "Indicates the encryption setting for data in transit between clients and brokers. You must set it to one of the following values.\n\n`TLS` means that client-broker communication is enabled with TLS only.\n\n`TLS_PLAINTEXT` means that client-broker communication is enabled for both TLS-encrypted, as well as plaintext data.\n\n`PLAINTEXT` means that client-broker communication is enabled in plaintext only.\n\nThe default value is `TLS` .", + "markdownDescription": "", "title": "ClientBroker", "type": "string" }, "InCluster": { - "markdownDescription": "When set to true, it indicates that data communication among the broker nodes of the cluster is encrypted. When set to false, the communication happens in plaintext.\n\nThe default value is true.", + "markdownDescription": "", "title": "InCluster", "type": "boolean" } @@ -152339,12 +152331,12 @@ "properties": { "EncryptionAtRest": { "$ref": "#/definitions/AWS::MSK::Cluster.EncryptionAtRest", - "markdownDescription": "The data-volume encryption details.", + "markdownDescription": "", "title": "EncryptionAtRest" }, "EncryptionInTransit": { "$ref": "#/definitions/AWS::MSK::Cluster.EncryptionInTransit", - "markdownDescription": "The details for encryption in transit.", + "markdownDescription": "", "title": "EncryptionInTransit" } }, @@ -152354,12 +152346,12 @@ "additionalProperties": false, "properties": { "DeliveryStream": { - "markdownDescription": "The Kinesis Data Firehose delivery stream that is the destination for broker logs.", + "markdownDescription": "", "title": "DeliveryStream", "type": "string" }, "Enabled": { - "markdownDescription": "Specifies whether broker logs get sent to the specified Kinesis Data Firehose delivery stream.", + "markdownDescription": "", "title": "Enabled", "type": "boolean" } @@ -152373,7 +152365,7 @@ "additionalProperties": false, "properties": { "Enabled": { - "markdownDescription": "SASL/IAM authentication is enabled or not.", + "markdownDescription": "", "title": "Enabled", "type": "boolean" } @@ -152387,7 +152379,7 @@ "additionalProperties": false, "properties": { "EnabledInBroker": { - "markdownDescription": "Indicates whether you want to enable or disable the JMX Exporter.", + "markdownDescription": "", "title": "EnabledInBroker", "type": "boolean" } @@ -152402,7 +152394,7 @@ "properties": { "BrokerLogs": { "$ref": "#/definitions/AWS::MSK::Cluster.BrokerLogs", - "markdownDescription": "You can configure your MSK cluster to send broker logs to different destination types. This configuration specifies the details of these destinations.", + "markdownDescription": "", "title": "BrokerLogs" } }, @@ -152415,7 +152407,7 @@ "additionalProperties": false, "properties": { "EnabledInBroker": { - "markdownDescription": "Indicates whether you want to enable or disable the Node Exporter.", + "markdownDescription": "", "title": "EnabledInBroker", "type": "boolean" } @@ -152430,7 +152422,7 @@ "properties": { "Prometheus": { "$ref": "#/definitions/AWS::MSK::Cluster.Prometheus", - "markdownDescription": "Prometheus exporter settings.", + "markdownDescription": "", "title": "Prometheus" } }, @@ -152444,12 +152436,12 @@ "properties": { "JmxExporter": { "$ref": "#/definitions/AWS::MSK::Cluster.JmxExporter", - "markdownDescription": "Indicates whether you want to enable or disable the JMX Exporter.", + "markdownDescription": "", "title": "JmxExporter" }, "NodeExporter": { "$ref": "#/definitions/AWS::MSK::Cluster.NodeExporter", - "markdownDescription": "Indicates whether you want to enable or disable the Node Exporter.", + "markdownDescription": "", "title": "NodeExporter" } }, @@ -152459,12 +152451,12 @@ "additionalProperties": false, "properties": { "Enabled": { - "markdownDescription": "Provisioned throughput is enabled or not.", + "markdownDescription": "", "title": "Enabled", "type": "boolean" }, "VolumeThroughput": { - "markdownDescription": "Throughput value of the EBS volumes for the data drive on each kafka broker node in MiB per second.", + "markdownDescription": "", "title": "VolumeThroughput", "type": "number" } @@ -152475,7 +152467,7 @@ "additionalProperties": false, "properties": { "Type": { - "markdownDescription": "DISABLED means that public access is turned off. SERVICE_PROVIDED_EIPS means that public access is turned on.", + "markdownDescription": "", "title": "Type", "type": "string" } @@ -152486,17 +152478,17 @@ "additionalProperties": false, "properties": { "Bucket": { - "markdownDescription": "The name of the S3 bucket that is the destination for broker logs.", + "markdownDescription": "", "title": "Bucket", "type": "string" }, "Enabled": { - "markdownDescription": "Specifies whether broker logs get sent to the specified Amazon S3 destination.", + "markdownDescription": "", "title": "Enabled", "type": "boolean" }, "Prefix": { - "markdownDescription": "The S3 prefix that is the destination for broker logs.", + "markdownDescription": "", "title": "Prefix", "type": "string" } @@ -152511,12 +152503,12 @@ "properties": { "Iam": { "$ref": "#/definitions/AWS::MSK::Cluster.Iam", - "markdownDescription": "Details for ClientAuthentication using IAM.", + "markdownDescription": "", "title": "Iam" }, "Scram": { "$ref": "#/definitions/AWS::MSK::Cluster.Scram", - "markdownDescription": "Details for SASL/SCRAM client authentication.", + "markdownDescription": "", "title": "Scram" } }, @@ -152526,7 +152518,7 @@ "additionalProperties": false, "properties": { "Enabled": { - "markdownDescription": "SASL/SCRAM authentication is enabled or not.", + "markdownDescription": "", "title": "Enabled", "type": "boolean" } @@ -152541,7 +152533,7 @@ "properties": { "EBSStorageInfo": { "$ref": "#/definitions/AWS::MSK::Cluster.EBSStorageInfo", - "markdownDescription": "EBS volume information.", + "markdownDescription": "", "title": "EBSStorageInfo" } }, @@ -152554,12 +152546,12 @@ "items": { "type": "string" }, - "markdownDescription": "List of AWS Private CA Amazon Resource Name (ARN)s.", + "markdownDescription": "", "title": "CertificateAuthorityArnList", "type": "array" }, "Enabled": { - "markdownDescription": "TLS authentication is enabled or not.", + "markdownDescription": "", "title": "Enabled", "type": "boolean" } @@ -152570,7 +152562,7 @@ "additionalProperties": false, "properties": { "Enabled": { - "markdownDescription": "Unauthenticated is enabled or not.", + "markdownDescription": "", "title": "Enabled", "type": "boolean" } @@ -152585,7 +152577,7 @@ "properties": { "ClientAuthentication": { "$ref": "#/definitions/AWS::MSK::Cluster.VpcConnectivityClientAuthentication", - "markdownDescription": "VPC connection control settings for brokers.", + "markdownDescription": "", "title": "ClientAuthentication" } }, @@ -152596,12 +152588,12 @@ "properties": { "Sasl": { "$ref": "#/definitions/AWS::MSK::Cluster.VpcConnectivitySasl", - "markdownDescription": "Details for VpcConnectivity ClientAuthentication using SASL.", + "markdownDescription": "", "title": "Sasl" }, "Tls": { "$ref": "#/definitions/AWS::MSK::Cluster.VpcConnectivityTls", - "markdownDescription": "Details for VpcConnectivity ClientAuthentication using TLS.", + "markdownDescription": "", "title": "Tls" } }, @@ -152611,7 +152603,7 @@ "additionalProperties": false, "properties": { "Enabled": { - "markdownDescription": "SASL/IAM authentication is enabled or not.", + "markdownDescription": "", "title": "Enabled", "type": "boolean" } @@ -152626,12 +152618,12 @@ "properties": { "Iam": { "$ref": "#/definitions/AWS::MSK::Cluster.VpcConnectivityIam", - "markdownDescription": "Details for ClientAuthentication using IAM for VpcConnectivity.", + "markdownDescription": "", "title": "Iam" }, "Scram": { "$ref": "#/definitions/AWS::MSK::Cluster.VpcConnectivityScram", - "markdownDescription": "Details for SASL/SCRAM client authentication for VpcConnectivity.", + "markdownDescription": "", "title": "Scram" } }, @@ -152641,7 +152633,7 @@ "additionalProperties": false, "properties": { "Enabled": { - "markdownDescription": "SASL/SCRAM authentication is enabled or not.", + "markdownDescription": "", "title": "Enabled", "type": "boolean" } @@ -152655,7 +152647,7 @@ "additionalProperties": false, "properties": { "Enabled": { - "markdownDescription": "TLS authentication is enabled or not.", + "markdownDescription": "", "title": "Enabled", "type": "boolean" } @@ -152774,7 +152766,7 @@ "additionalProperties": false, "properties": { "Description": { - "markdownDescription": "The description of the configuration.", + "markdownDescription": "", "title": "Description", "type": "string" }, @@ -152788,16 +152780,16 @@ }, "LatestRevision": { "$ref": "#/definitions/AWS::MSK::Configuration.LatestRevision", - "markdownDescription": "Latest revision of the configuration.", + "markdownDescription": "", "title": "LatestRevision" }, "Name": { - "markdownDescription": "The name of the configuration. Configuration names are strings that match the regex \"^[0-9A-Za-z][0-9A-Za-z-]{0,}$\".", + "markdownDescription": "", "title": "Name", "type": "string" }, "ServerProperties": { - "markdownDescription": "Contents of the server.properties file. When using the API, you must ensure that the contents of the file are base64 encoded. When using the console, the SDK, or the CLI, the contents of server.properties can be in plaintext.", + "markdownDescription": "", "title": "ServerProperties", "type": "string" } @@ -152886,12 +152878,12 @@ "additionalProperties": false, "properties": { "CurrentVersion": { - "markdownDescription": "", + "markdownDescription": "The current version number of the replicator.", "title": "CurrentVersion", "type": "string" }, "Description": { - "markdownDescription": "", + "markdownDescription": "A summary description of the replicator.", "title": "Description", "type": "string" }, @@ -152899,7 +152891,7 @@ "items": { "$ref": "#/definitions/AWS::MSK::Replicator.KafkaCluster" }, - "markdownDescription": "", + "markdownDescription": "Kafka Clusters to use in setting up sources / targets for replication.", "title": "KafkaClusters", "type": "array" }, @@ -152907,17 +152899,17 @@ "items": { "$ref": "#/definitions/AWS::MSK::Replicator.ReplicationInfo" }, - "markdownDescription": "", + "markdownDescription": "A list of replication configurations, where each configuration targets a given source cluster to target cluster replication flow.", "title": "ReplicationInfoList", "type": "array" }, "ReplicatorName": { - "markdownDescription": "", + "markdownDescription": "The name of the replicator. Alpha-numeric characters with '-' are allowed.", "title": "ReplicatorName", "type": "string" }, "ServiceExecutionRoleArn": { - "markdownDescription": "", + "markdownDescription": "The ARN of the IAM role used by the replicator to access resources in the customer's account (e.g source and target clusters)", "title": "ServiceExecutionRoleArn", "type": "string" }, @@ -152925,7 +152917,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "", + "markdownDescription": "List of tags to attach to created Replicator.", "title": "Tags", "type": "array" } @@ -152963,7 +152955,7 @@ "additionalProperties": false, "properties": { "MskClusterArn": { - "markdownDescription": "", + "markdownDescription": "The Amazon Resource Name (ARN) of an Amazon MSK cluster.", "title": "MskClusterArn", "type": "string" } @@ -152980,7 +152972,7 @@ "items": { "type": "string" }, - "markdownDescription": "", + "markdownDescription": "List of regular expression patterns indicating the consumer groups that should not be replicated.", "title": "ConsumerGroupsToExclude", "type": "array" }, @@ -152988,17 +152980,17 @@ "items": { "type": "string" }, - "markdownDescription": "", + "markdownDescription": "List of regular expression patterns indicating the consumer groups to copy.", "title": "ConsumerGroupsToReplicate", "type": "array" }, "DetectAndCopyNewConsumerGroups": { - "markdownDescription": "", + "markdownDescription": "Enables synchronization of consumer groups to target cluster.", "title": "DetectAndCopyNewConsumerGroups", "type": "boolean" }, "SynchroniseConsumerGroupOffsets": { - "markdownDescription": "", + "markdownDescription": "Enables synchronization of consumer group offsets to target cluster. The translated offsets will be written to topic __consumer_offsets.", "title": "SynchroniseConsumerGroupOffsets", "type": "boolean" } @@ -153013,12 +153005,12 @@ "properties": { "AmazonMskCluster": { "$ref": "#/definitions/AWS::MSK::Replicator.AmazonMskCluster", - "markdownDescription": "", + "markdownDescription": "Details of an Amazon MSK Cluster.", "title": "AmazonMskCluster" }, "VpcConfig": { "$ref": "#/definitions/AWS::MSK::Replicator.KafkaClusterClientVpcConfig", - "markdownDescription": "", + "markdownDescription": "Details of an Amazon VPC which has network connectivity to the Apache Kafka cluster.", "title": "VpcConfig" } }, @@ -153035,7 +153027,7 @@ "items": { "type": "string" }, - "markdownDescription": "", + "markdownDescription": "The security groups to attach to the ENIs for the broker nodes.", "title": "SecurityGroupIds", "type": "array" }, @@ -153043,7 +153035,7 @@ "items": { "type": "string" }, - "markdownDescription": "", + "markdownDescription": "The list of subnets in the client VPC to connect to.", "title": "SubnetIds", "type": "array" } @@ -153058,27 +153050,27 @@ "properties": { "ConsumerGroupReplication": { "$ref": "#/definitions/AWS::MSK::Replicator.ConsumerGroupReplication", - "markdownDescription": "", + "markdownDescription": "Configuration relating to consumer group replication.", "title": "ConsumerGroupReplication" }, "SourceKafkaClusterArn": { - "markdownDescription": "", + "markdownDescription": "The ARN of the source Kafka cluster.", "title": "SourceKafkaClusterArn", "type": "string" }, "TargetCompressionType": { - "markdownDescription": "", + "markdownDescription": "The compression type to use when producing records to target cluster.", "title": "TargetCompressionType", "type": "string" }, "TargetKafkaClusterArn": { - "markdownDescription": "", + "markdownDescription": "The ARN of the target Kafka cluster.", "title": "TargetKafkaClusterArn", "type": "string" }, "TopicReplication": { "$ref": "#/definitions/AWS::MSK::Replicator.TopicReplication", - "markdownDescription": "", + "markdownDescription": "Configuration relating to topic replication.", "title": "TopicReplication" } }, @@ -153095,7 +153087,7 @@ "additionalProperties": false, "properties": { "Type": { - "markdownDescription": "", + "markdownDescription": "The type of replication starting position.", "title": "Type", "type": "string" } @@ -153106,30 +153098,30 @@ "additionalProperties": false, "properties": { "CopyAccessControlListsForTopics": { - "markdownDescription": "", + "markdownDescription": "Whether to periodically configure remote topic ACLs to match their corresponding upstream topics.", "title": "CopyAccessControlListsForTopics", "type": "boolean" }, "CopyTopicConfigurations": { - "markdownDescription": "", + "markdownDescription": "Whether to periodically configure remote topics to match their corresponding upstream topics.", "title": "CopyTopicConfigurations", "type": "boolean" }, "DetectAndCopyNewTopics": { - "markdownDescription": "", + "markdownDescription": "Whether to periodically check for new topics and partitions.", "title": "DetectAndCopyNewTopics", "type": "boolean" }, "StartingPosition": { "$ref": "#/definitions/AWS::MSK::Replicator.ReplicationStartingPosition", - "markdownDescription": "", + "markdownDescription": "Specifies the position in the topics to start replicating from.", "title": "StartingPosition" }, "TopicsToExclude": { "items": { "type": "string" }, - "markdownDescription": "", + "markdownDescription": "List of regular expression patterns indicating the topics that should not be replicated.", "title": "TopicsToExclude", "type": "array" }, @@ -153137,7 +153129,7 @@ "items": { "type": "string" }, - "markdownDescription": "", + "markdownDescription": "List of regular expression patterns indicating the topics to copy.", "title": "TopicsToReplicate", "type": "array" } @@ -153184,7 +153176,7 @@ "properties": { "ClientAuthentication": { "$ref": "#/definitions/AWS::MSK::ServerlessCluster.ClientAuthentication", - "markdownDescription": "Includes all client authentication information.", + "markdownDescription": "", "title": "ClientAuthentication" }, "ClusterName": { @@ -153245,7 +153237,7 @@ "properties": { "Sasl": { "$ref": "#/definitions/AWS::MSK::ServerlessCluster.Sasl", - "markdownDescription": "Details for client authentication using SASL. To turn on SASL, you must also turn on `EncryptionInTransit` by setting `inCluster` to true. You must set `clientBroker` to either `TLS` or `TLS_PLAINTEXT` . If you choose `TLS_PLAINTEXT` , then you must also set `unauthenticated` to true.", + "markdownDescription": "", "title": "Sasl" } }, @@ -153258,7 +153250,7 @@ "additionalProperties": false, "properties": { "Enabled": { - "markdownDescription": "SASL/IAM authentication is enabled or not.", + "markdownDescription": "", "title": "Enabled", "type": "boolean" } @@ -153273,7 +153265,7 @@ "properties": { "Iam": { "$ref": "#/definitions/AWS::MSK::ServerlessCluster.Iam", - "markdownDescription": "Details for ClientAuthentication using IAM.", + "markdownDescription": "", "title": "Iam" } }, @@ -153351,7 +153343,7 @@ "items": { "type": "string" }, - "markdownDescription": "The list of subnets in the client VPC to connect to.", + "markdownDescription": "", "title": "ClientSubnets", "type": "array" }, @@ -153359,13 +153351,13 @@ "items": { "type": "string" }, - "markdownDescription": "The security groups to attach to the ENIs for the broker nodes.", + "markdownDescription": "", "title": "SecurityGroups", "type": "array" }, "Tags": { "additionalProperties": true, - "markdownDescription": "Create tags when creating the VPC connection.", + "markdownDescription": "", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" @@ -153375,12 +153367,12 @@ "type": "object" }, "TargetClusterArn": { - "markdownDescription": "The Amazon Resource Name (ARN) of the cluster.", + "markdownDescription": "", "title": "TargetClusterArn", "type": "string" }, "VpcId": { - "markdownDescription": "The VPC id of the remote client.", + "markdownDescription": "", "title": "VpcId", "type": "string" } @@ -153456,7 +153448,7 @@ "type": "object" }, "AirflowVersion": { - "markdownDescription": "The version of Apache Airflow to use for the environment. If no value is specified, defaults to the latest version.\n\nIf you specify a newer version number for an existing environment, the version update requires some service interruption before taking effect.\n\n*Allowed Values* : `1.10.12` | `2.0.2` | `2.2.2` | `2.4.3` | `2.5.1` | `2.6.3` | `2.7.2` (latest)", + "markdownDescription": "The version of Apache Airflow to use for the environment. If no value is specified, defaults to the latest version.\n\nIf you specify a newer version number for an existing environment, the version update requires some service interruption before taking effect.\n\n*Allowed Values* : `1.10.12` | `2.0.2` | `2.2.2` | `2.4.3` | `2.5.1` | `2.6.3` | `2.7.2` | `2.8.1` | `2.9.2` (latest)", "title": "AirflowVersion", "type": "string" }, @@ -161769,8 +161761,6 @@ "additionalProperties": false, "properties": { "ChannelId": { - "markdownDescription": "The unique ID of the channel.", - "title": "ChannelId", "type": "string" }, "MultiplexId": { @@ -166113,7 +166103,7 @@ "type": "boolean" }, "KmsKeyId": { - "markdownDescription": "If `StorageEncrypted` is true, the Amazon KMS key identifier for the encrypted DB cluster.", + "markdownDescription": "The Amazon Resource Name (ARN) of the KMS key that is used to encrypt the database instances in the DB cluster, such as `arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef` . If you enable the `StorageEncrypted` property but don't specify this property, the default KMS key is used. If you specify this property, you must set the `StorageEncrypted` property to `true` .", "title": "KmsKeyId", "type": "string" }, @@ -166153,7 +166143,7 @@ "type": "string" }, "StorageEncrypted": { - "markdownDescription": "Indicates whether the DB cluster is encrypted.\n\nIf you specify the `DBClusterIdentifier` , `DBSnapshotIdentifier` , or `SourceDBInstanceIdentifier` property, don't specify this property. The value is inherited from the cluster, snapshot, or source DB instance. If you specify the `KmsKeyId` property, you must enable encryption.\n\nIf you specify the `KmsKeyId` , you must enable encryption by setting `StorageEncrypted` to true.", + "markdownDescription": "Indicates whether the DB cluster is encrypted.\n\nIf you specify the `KmsKeyId` property, then you must enable encryption and set this property to `true` .\n\nIf you enable the `StorageEncrypted` property but don't specify the `KmsKeyId` property, then the default KMS key is used. If you specify the `KmsKeyId` property, then that KMS key is used to encrypt the database instances in the DB cluster.\n\nIf you specify the `SourceDBClusterIdentifier` property, and don't specify this property or disable it, the value is inherited from the source DB cluster. If the source DB cluster is encrypted, the `KmsKeyId` property from the source cluster is used.\n\nIf you specify the `DBSnapshotIdentifier` and don't specify this property or disable it, the value is inherited from the snapshot and the specified `KmsKeyId` property from the snapshot is used.", "title": "StorageEncrypted", "type": "boolean" }, @@ -167377,7 +167367,7 @@ "properties": { "LogDestination": { "additionalProperties": true, - "markdownDescription": "The named location for the logs, provided in a key:value mapping that is specific to the chosen destination type.\n\n- For an Amazon S3 bucket, provide the name of the bucket, with key `bucketName` , and optionally provide a prefix, with key `prefix` . The following example specifies an Amazon S3 bucket named `DOC-EXAMPLE-BUCKET` and the prefix `alerts` :\n\n`\"LogDestination\": { \"bucketName\": \"DOC-EXAMPLE-BUCKET\", \"prefix\": \"alerts\" }`\n- For a CloudWatch log group, provide the name of the CloudWatch log group, with key `logGroup` . The following example specifies a log group named `alert-log-group` :\n\n`\"LogDestination\": { \"logGroup\": \"alert-log-group\" }`\n- For a Firehose delivery stream, provide the name of the delivery stream, with key `deliveryStream` . The following example specifies a delivery stream named `alert-delivery-stream` :\n\n`\"LogDestination\": { \"deliveryStream\": \"alert-delivery-stream\" }`", + "markdownDescription": "The named location for the logs, provided in a key:value mapping that is specific to the chosen destination type.\n\n- For an Amazon S3 bucket, provide the name of the bucket, with key `bucketName` , and optionally provide a prefix, with key `prefix` .\n\nThe following example specifies an Amazon S3 bucket named `DOC-EXAMPLE-BUCKET` and the prefix `alerts` :\n\n`\"LogDestination\": { \"bucketName\": \"DOC-EXAMPLE-BUCKET\", \"prefix\": \"alerts\" }`\n- For a CloudWatch log group, provide the name of the CloudWatch log group, with key `logGroup` . The following example specifies a log group named `alert-log-group` :\n\n`\"LogDestination\": { \"logGroup\": \"alert-log-group\" }`\n- For a Firehose delivery stream, provide the name of the delivery stream, with key `deliveryStream` . The following example specifies a delivery stream named `alert-delivery-stream` :\n\n`\"LogDestination\": { \"deliveryStream\": \"alert-delivery-stream\" }`", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" @@ -167392,7 +167382,7 @@ "type": "string" }, "LogType": { - "markdownDescription": "The type of log to send. Alert logs report traffic that matches a stateful rule with an action setting that sends an alert log message. Flow logs are standard network traffic flow logs.", + "markdownDescription": "The type of log to record. You can record the following types of logs from your Network Firewall stateful engine.\n\n- `ALERT` - Logs for traffic that matches your stateful rules and that have an action that sends an alert. A stateful rule sends alerts for the rule actions DROP, ALERT, and REJECT. For more information, see the `StatefulRule` property.\n- `FLOW` - Standard network traffic flow logs. The stateful rules engine records flow logs for all network traffic that it receives. Each flow log record captures the network flow for a specific standard stateless rule group.\n- `TLS` - Logs for events that are related to TLS inspection. For more information, see [Inspecting SSL/TLS traffic with TLS inspection configurations](https://docs.aws.amazon.com/network-firewall/latest/developerguide/tls-inspection-configurations.html) in the *Network Firewall Developer Guide* .", "title": "LogType", "type": "string" } @@ -174945,7 +174935,7 @@ "additionalProperties": false, "properties": { "Content": { - "markdownDescription": "The policy text content. You can specify the policy content as a JSON object or a JSON string.\n\n> When you specify the policy content as a JSON string, you can't perform drift detection on the CloudFormation stack. For this reason, we recommend specifying the policy content as a JSON object instead. \n\nThe text that you supply must adhere to the rules of the policy type you specify in the `Type` parameter. The following AWS Organizations quotas are enforced for the maximum size of a policy document:\n\n- Service control policies: 5,120 characters\n- AI services opt-out policies: 2,500 characters\n- Backup policies: 10,000 characters\n- Tag policies: 10,000 characters\n\nFor more information about Organizations service quotas, see [Quotas for AWS Organizations](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) in the *AWS Organizations User Guide* .", + "markdownDescription": "The policy text content. You can specify the policy content as a JSON object or a JSON string.\n\n> When you specify the policy content as a JSON string, you can't perform drift detection on the CloudFormation stack. For this reason, we recommend specifying the policy content as a JSON object instead. \n\nThe text that you supply must adhere to the rules of the policy type you specify in the `Type` parameter. The following AWS Organizations quotas are enforced for the maximum size of a policy document:\n\n- Service control policies: 5,120 characters\n- Backup policies: 10,000 characters\n- Tag policies: 10,000 characters\n- Chatbot policies: 10,000 characters\n- AI services opt-out policies: 2,500 characters\n\nFor more information about Organizations service quotas, see [Quotas for AWS Organizations](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) in the *AWS Organizations User Guide* .", "title": "Content", "type": "object" }, @@ -182008,22 +181998,22 @@ "type": "number" }, "MaximumRecordAgeInSeconds": { - "markdownDescription": "(Streams only) Discard records older than the specified age. The default value is -1, which sets the maximum age to infinite. When the value is set to infinite, EventBridge never discards old records.", + "markdownDescription": "Discard records older than the specified age. The default value is -1, which sets the maximum age to infinite. When the value is set to infinite, EventBridge never discards old records.", "title": "MaximumRecordAgeInSeconds", "type": "number" }, "MaximumRetryAttempts": { - "markdownDescription": "(Streams only) Discard records after the specified number of retries. The default value is -1, which sets the maximum number of retries to infinite. When MaximumRetryAttempts is infinite, EventBridge retries failed records until the record expires in the event source.", + "markdownDescription": "Discard records after the specified number of retries. The default value is -1, which sets the maximum number of retries to infinite. When MaximumRetryAttempts is infinite, EventBridge retries failed records until the record expires in the event source.", "title": "MaximumRetryAttempts", "type": "number" }, "OnPartialBatchItemFailure": { - "markdownDescription": "(Streams only) Define how to handle item process failures. `AUTOMATIC_BISECT` halves each batch and retry each half until all the records are processed or there is one failed message left in the batch.", + "markdownDescription": "Define how to handle item process failures. `AUTOMATIC_BISECT` halves each batch and retry each half until all the records are processed or there is one failed message left in the batch.", "title": "OnPartialBatchItemFailure", "type": "string" }, "ParallelizationFactor": { - "markdownDescription": "(Streams only) The number of batches to process concurrently from each shard. The default value is 1.", + "markdownDescription": "The number of batches to process concurrently from each shard. The default value is 1.", "title": "ParallelizationFactor", "type": "number" }, @@ -182057,27 +182047,27 @@ "type": "number" }, "MaximumRecordAgeInSeconds": { - "markdownDescription": "(Streams only) Discard records older than the specified age. The default value is -1, which sets the maximum age to infinite. When the value is set to infinite, EventBridge never discards old records.", + "markdownDescription": "Discard records older than the specified age. The default value is -1, which sets the maximum age to infinite. When the value is set to infinite, EventBridge never discards old records.", "title": "MaximumRecordAgeInSeconds", "type": "number" }, "MaximumRetryAttempts": { - "markdownDescription": "(Streams only) Discard records after the specified number of retries. The default value is -1, which sets the maximum number of retries to infinite. When MaximumRetryAttempts is infinite, EventBridge retries failed records until the record expires in the event source.", + "markdownDescription": "Discard records after the specified number of retries. The default value is -1, which sets the maximum number of retries to infinite. When MaximumRetryAttempts is infinite, EventBridge retries failed records until the record expires in the event source.", "title": "MaximumRetryAttempts", "type": "number" }, "OnPartialBatchItemFailure": { - "markdownDescription": "(Streams only) Define how to handle item process failures. `AUTOMATIC_BISECT` halves each batch and retry each half until all the records are processed or there is one failed message left in the batch.", + "markdownDescription": "Define how to handle item process failures. `AUTOMATIC_BISECT` halves each batch and retry each half until all the records are processed or there is one failed message left in the batch.", "title": "OnPartialBatchItemFailure", "type": "string" }, "ParallelizationFactor": { - "markdownDescription": "(Streams only) The number of batches to process concurrently from each shard. The default value is 1.", + "markdownDescription": "The number of batches to process concurrently from each shard. The default value is 1.", "title": "ParallelizationFactor", "type": "number" }, "StartingPosition": { - "markdownDescription": "(Streams only) The position in a stream from which to start reading.", + "markdownDescription": "The position in a stream from which to start reading.", "title": "StartingPosition", "type": "string" }, @@ -182116,7 +182106,7 @@ "type": "number" }, "StartingPosition": { - "markdownDescription": "(Streams only) The position in a stream from which to start reading.", + "markdownDescription": "The position in a stream from which to start reading.", "title": "StartingPosition", "type": "string" }, @@ -182249,7 +182239,7 @@ "type": "string" }, "StartingPosition": { - "markdownDescription": "(Streams only) The position in a stream from which to start reading.", + "markdownDescription": "The position in a stream from which to start reading.", "title": "StartingPosition", "type": "string" }, @@ -182742,7 +182732,7 @@ "type": "string" }, "OutputFormat": { - "markdownDescription": "The format EventBridge uses for the log records.\n\n- `json` : JSON\n- `plain` : Plain text\n- `w3c` : [W3C extended logging file format](https://docs.aws.amazon.com/https://www.w3.org/TR/WD-logfile)", + "markdownDescription": "The format EventBridge uses for the log records.\n\nEventBridge currently only supports `json` formatting.", "title": "OutputFormat", "type": "string" }, @@ -182807,7 +182797,7 @@ "items": { "type": "string" }, - "markdownDescription": "Specifies the security groups associated with the stream. These security groups must all be in the same VPC. You can specify as many as five security groups. If you do not specify a security group, the default security group for the VPC is used.", + "markdownDescription": "Specifies the security groups associated with the stream. These security groups must all be in the same VPC. You can specify as many as five security groups.", "title": "SecurityGroup", "type": "array" }, @@ -224627,7 +224617,7 @@ "type": "array" }, "BacktrackWindow": { - "markdownDescription": "The target backtrack window, in seconds. To disable backtracking, set this value to 0.\n\n> Currently, Backtrack is only supported for Aurora MySQL DB clusters. \n\nDefault: 0\n\nConstraints:\n\n- If specified, this value must be set to a number from 0 to 259,200 (72 hours).\n\nValid for: Aurora MySQL DB clusters only", + "markdownDescription": "The target backtrack window, in seconds. To disable backtracking, set this value to `0` .\n\nValid for Cluster Type: Aurora MySQL DB clusters only\n\nDefault: `0`\n\nConstraints:\n\n- If specified, this value must be set to a number from 0 to 259,200 (72 hours).", "title": "BacktrackWindow", "type": "number" }, @@ -224810,7 +224800,7 @@ "type": "string" }, "PubliclyAccessible": { - "markdownDescription": "Specifies whether the DB cluster is publicly accessible.\n\nWhen the DB cluster is publicly accessible, its Domain Name System (DNS) endpoint resolves to the private IP address from within the DB cluster's virtual private cloud (VPC). It resolves to the public IP address from outside of the DB cluster's VPC. Access to the DB cluster is ultimately controlled by the security group it uses. That public access isn't permitted if the security group assigned to the DB cluster doesn't permit it.\n\nWhen the DB cluster isn't publicly accessible, it is an internal DB cluster with a DNS name that resolves to a private IP address.\n\nValid for Cluster Type: Multi-AZ DB clusters only\n\nDefault: The default behavior varies depending on whether `DBSubnetGroupName` is specified.\n\nIf `DBSubnetGroupName` isn't specified, and `PubliclyAccessible` isn't specified, the following applies:\n\n- If the default VPC in the target Region doesn\u2019t have an internet gateway attached to it, the DB cluster is private.\n- If the default VPC in the target Region has an internet gateway attached to it, the DB cluster is public.\n\nIf `DBSubnetGroupName` is specified, and `PubliclyAccessible` isn't specified, the following applies:\n\n- If the subnets are part of a VPC that doesn\u2019t have an internet gateway attached to it, the DB cluster is private.\n- If the subnets are part of a VPC that has an internet gateway attached to it, the DB cluster is public.", + "markdownDescription": "Specifies whether the DB cluster is publicly accessible.\n\nWhen the DB cluster is publicly accessible and you connect from outside of the DB cluster's virtual private cloud (VPC), its Domain Name System (DNS) endpoint resolves to the public IP address. When you connect from within the same VPC as the DB cluster, the endpoint resolves to the private IP address. Access to the DB cluster is ultimately controlled by the security group it uses. That public access isn't permitted if the security group assigned to the DB cluster doesn't permit it.\n\nWhen the DB cluster isn't publicly accessible, it is an internal DB cluster with a DNS name that resolves to a private IP address.\n\nValid for Cluster Type: Multi-AZ DB clusters only\n\nDefault: The default behavior varies depending on whether `DBSubnetGroupName` is specified.\n\nIf `DBSubnetGroupName` isn't specified, and `PubliclyAccessible` isn't specified, the following applies:\n\n- If the default VPC in the target Region doesn\u2019t have an internet gateway attached to it, the DB cluster is private.\n- If the default VPC in the target Region has an internet gateway attached to it, the DB cluster is public.\n\nIf `DBSubnetGroupName` is specified, and `PubliclyAccessible` isn't specified, the following applies:\n\n- If the subnets are part of a VPC that doesn\u2019t have an internet gateway attached to it, the DB cluster is private.\n- If the subnets are part of a VPC that has an internet gateway attached to it, the DB cluster is public.", "title": "PubliclyAccessible", "type": "boolean" }, @@ -224868,7 +224858,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "An optional array of key-value pairs to apply to this DB cluster.\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", + "markdownDescription": "Tags to assign to the DB cluster.\n\nValid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters", "title": "Tags", "type": "array" }, @@ -224952,7 +224942,7 @@ "type": "string" }, "SecretArn": { - "markdownDescription": "The Amazon Resource Name (ARN) of the secret.", + "markdownDescription": "The Amazon Resource Name (ARN) of the secret. This parameter is a return value that you can retrieve using the `Fn::GetAtt` intrinsic function. For more information, see [Return values](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-rds-dbcluster.html#aws-resource-rds-dbcluster-return-values) .", "title": "SecretArn", "type": "string" } @@ -225058,17 +225048,17 @@ "additionalProperties": false, "properties": { "DBClusterParameterGroupName": { - "markdownDescription": "The name of the DB cluster parameter group.\n\nConstraints:\n\n- Must not match the name of an existing DB cluster parameter group.\n\nIf you don't specify a value for `DBClusterParameterGroupName` property, a name is automatically created for the DB cluster parameter group.\n\n> This value is stored as a lowercase string.", + "markdownDescription": "The name of the DB cluster parameter group.\n\nConstraints:\n\n- Must not match the name of an existing DB cluster parameter group.\n\n> This value is stored as a lowercase string.", "title": "DBClusterParameterGroupName", "type": "string" }, "Description": { - "markdownDescription": "A friendly description for this DB cluster parameter group.", + "markdownDescription": "The description for the DB cluster parameter group.", "title": "Description", "type": "string" }, "Family": { - "markdownDescription": "The DB cluster parameter group family name. A DB cluster parameter group can be associated with one and only one DB cluster parameter group family, and can be applied only to a DB cluster running a DB engine and engine version compatible with that DB cluster parameter group family.\n\n> The DB cluster parameter group family can't be changed when updating a DB cluster parameter group. \n\nTo list all of the available parameter group families, use the following command:\n\n`aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\"`\n\nThe output contains duplicates.\n\nFor more information, see `[CreateDBClusterParameterGroup](https://docs.aws.amazon.com//AmazonRDS/latest/APIReference/API_CreateDBClusterParameterGroup.html)` .", + "markdownDescription": "The DB cluster parameter group family name. A DB cluster parameter group can be associated with one and only one DB cluster parameter group family, and can be applied only to a DB cluster running a database engine and engine version compatible with that DB cluster parameter group family.\n\n*Aurora MySQL*\n\nExample: `aurora-mysql5.7` , `aurora-mysql8.0`\n\n*Aurora PostgreSQL*\n\nExample: `aurora-postgresql14`\n\n*RDS for MySQL*\n\nExample: `mysql8.0`\n\n*RDS for PostgreSQL*\n\nExample: `postgres13`\n\nTo list all of the available parameter group families for a DB engine, use the following command:\n\n`aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\" --engine `\n\nFor example, to list all of the available parameter group families for the Aurora PostgreSQL DB engine, use the following command:\n\n`aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\" --engine aurora-postgresql`\n\n> The output contains duplicates. \n\nThe following are the valid DB engine values:\n\n- `aurora-mysql`\n- `aurora-postgresql`\n- `mysql`\n- `postgres`", "title": "Family", "type": "string" }, @@ -225081,7 +225071,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "An optional array of key-value pairs to apply to this DB cluster parameter group.", + "markdownDescription": "Tags to assign to the DB cluster parameter group.", "title": "Tags", "type": "array" } @@ -225178,7 +225168,7 @@ "type": "string" }, "AutomaticBackupReplicationRegion": { - "markdownDescription": "", + "markdownDescription": "The AWS Region associated with the automated backup.", "title": "AutomaticBackupReplicationRegion", "type": "string" }, @@ -225223,7 +225213,7 @@ "type": "string" }, "DBClusterIdentifier": { - "markdownDescription": "The identifier of the DB cluster that the instance will belong to.", + "markdownDescription": "The identifier of the DB cluster that this DB instance will belong to.\n\nThis setting doesn't apply to RDS Custom DB instances.", "title": "DBClusterIdentifier", "type": "string" }, @@ -225261,12 +225251,12 @@ "type": "array" }, "DBSnapshotIdentifier": { - "markdownDescription": "The name or Amazon Resource Name (ARN) of the DB snapshot that's used to restore the DB instance. If you're restoring from a shared manual DB snapshot, you must specify the ARN of the snapshot.\n\nBy specifying this property, you can create a DB instance from the specified DB snapshot. If the `DBSnapshotIdentifier` property is an empty string or the `AWS::RDS::DBInstance` declaration has no `DBSnapshotIdentifier` property, AWS CloudFormation creates a new database. If the property contains a value (other than an empty string), AWS CloudFormation creates a database from the specified snapshot. If a snapshot with the specified name doesn't exist, AWS CloudFormation can't create the database and it rolls back the stack.\n\nSome DB instance properties aren't valid when you restore from a snapshot, such as the `MasterUsername` and `MasterUserPassword` properties. For information about the properties that you can specify, see the `RestoreDBInstanceFromDBSnapshot` action in the *Amazon RDS API Reference* .\n\nAfter you restore a DB instance with a `DBSnapshotIdentifier` property, you must specify the same `DBSnapshotIdentifier` property for any future updates to the DB instance. When you specify this property for an update, the DB instance is not restored from the DB snapshot again, and the data in the database is not changed. However, if you don't specify the `DBSnapshotIdentifier` property, an empty DB instance is created, and the original DB instance is deleted. If you specify a property that is different from the previous snapshot restore property, a new DB instance is restored from the specified `DBSnapshotIdentifier` property, and the original DB instance is deleted.\n\nIf you specify the `DBSnapshotIdentifier` property to restore a DB instance (as opposed to specifying it for DB instance updates), then don't specify the following properties:\n\n- `CharacterSetName`\n- `DBClusterIdentifier`\n- `DBName`\n- `DeleteAutomatedBackups`\n- `EnablePerformanceInsights`\n- `KmsKeyId`\n- `MasterUsername`\n- `MasterUserPassword`\n- `PerformanceInsightsKMSKeyId`\n- `PerformanceInsightsRetentionPeriod`\n- `PromotionTier`\n- `SourceDBInstanceIdentifier`\n- `SourceRegion`\n- `StorageEncrypted` (for an encrypted snapshot)\n- `Timezone`\n\n*Amazon Aurora*\n\nNot applicable. Snapshot restore is managed by the DB cluster.", + "markdownDescription": "The name or Amazon Resource Name (ARN) of the DB snapshot that's used to restore the DB instance. If you're restoring from a shared manual DB snapshot, you must specify the ARN of the snapshot.\n\nBy specifying this property, you can create a DB instance from the specified DB snapshot. If the `DBSnapshotIdentifier` property is an empty string or the `AWS::RDS::DBInstance` declaration has no `DBSnapshotIdentifier` property, AWS CloudFormation creates a new database. If the property contains a value (other than an empty string), AWS CloudFormation creates a database from the specified snapshot. If a snapshot with the specified name doesn't exist, AWS CloudFormation can't create the database and it rolls back the stack.\n\nSome DB instance properties aren't valid when you restore from a snapshot, such as the `MasterUsername` and `MasterUserPassword` properties. For information about the properties that you can specify, see the `RestoreDBInstanceFromDBSnapshot` action in the *Amazon RDS API Reference* .\n\nAfter you restore a DB instance with a `DBSnapshotIdentifier` property, you must specify the same `DBSnapshotIdentifier` property for any future updates to the DB instance. When you specify this property for an update, the DB instance is not restored from the DB snapshot again, and the data in the database is not changed. However, if you don't specify the `DBSnapshotIdentifier` property, an empty DB instance is created, and the original DB instance is deleted. If you specify a property that is different from the previous snapshot restore property, a new DB instance is restored from the specified `DBSnapshotIdentifier` property, and the original DB instance is deleted.\n\nIf you specify the `DBSnapshotIdentifier` property to restore a DB instance (as opposed to specifying it for DB instance updates), then don't specify the following properties:\n\n- `CharacterSetName`\n- `DBClusterIdentifier`\n- `DBName`\n- `KmsKeyId`\n- `MasterUsername`\n- `MasterUserPassword`\n- `PromotionTier`\n- `SourceDBInstanceIdentifier`\n- `SourceRegion`\n- `StorageEncrypted` (for an unencrypted snapshot)\n- `Timezone`\n\n*Amazon Aurora*\n\nNot applicable. Snapshot restore is managed by the DB cluster.", "title": "DBSnapshotIdentifier", "type": "string" }, "DBSubnetGroupName": { - "markdownDescription": "A DB subnet group to associate with the DB instance. If you update this value, the new subnet group must be a subnet group in a new VPC.\n\nIf there's no DB subnet group, then the DB instance isn't a VPC DB instance.\n\nFor more information about using Amazon RDS in a VPC, see [Using Amazon RDS with Amazon Virtual Private Cloud (VPC)](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_VPC.html) in the *Amazon RDS User Guide* .\n\n*Amazon Aurora*\n\nNot applicable. The DB subnet group is managed by the DB cluster. If specified, the setting must match the DB cluster setting.", + "markdownDescription": "A DB subnet group to associate with the DB instance. If you update this value, the new subnet group must be a subnet group in a new VPC.\n\nIf there's no DB subnet group, then the DB instance isn't a VPC DB instance.\n\nFor more information about using Amazon RDS in a VPC, see [Amazon VPC and Amazon RDS](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_VPC.html) in the *Amazon RDS User Guide* .\n\nThis setting doesn't apply to Amazon Aurora DB instances. The DB subnet group is managed by the DB cluster. If specified, the setting must match the DB cluster setting.", "title": "DBSubnetGroupName", "type": "string" }, @@ -225281,7 +225271,7 @@ "type": "boolean" }, "DeletionProtection": { - "markdownDescription": "A value that indicates whether the DB instance has deletion protection enabled. The database can't be deleted when deletion protection is enabled. By default, deletion protection is disabled. For more information, see [Deleting a DB Instance](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_DeleteInstance.html) .\n\n*Amazon Aurora*\n\nNot applicable. You can enable or disable deletion protection for the DB cluster. For more information, see `CreateDBCluster` . DB instances in a DB cluster can be deleted even when deletion protection is enabled for the DB cluster.", + "markdownDescription": "Specifies whether the DB instance has deletion protection enabled. The database can't be deleted when deletion protection is enabled. By default, deletion protection isn't enabled. For more information, see [Deleting a DB Instance](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_DeleteInstance.html) .\n\nThis setting doesn't apply to Amazon Aurora DB instances. You can enable or disable deletion protection for the DB cluster. For more information, see `CreateDBCluster` . DB instances in a DB cluster can be deleted even when deletion protection is enabled for the DB cluster.", "title": "DeletionProtection", "type": "boolean" }, @@ -225392,7 +225382,7 @@ "type": "number" }, "MonitoringInterval": { - "markdownDescription": "The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance. To disable collection of Enhanced Monitoring metrics, specify 0. The default is 0.\n\nIf `MonitoringRoleArn` is specified, then you must set `MonitoringInterval` to a value other than 0.\n\nThis setting doesn't apply to RDS Custom.\n\nValid Values: `0, 1, 5, 10, 15, 30, 60`", + "markdownDescription": "The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance. To disable collection of Enhanced Monitoring metrics, specify `0` .\n\nIf `MonitoringRoleArn` is specified, then you must set `MonitoringInterval` to a value other than `0` .\n\nThis setting doesn't apply to RDS Custom DB instances.\n\nValid Values: `0 | 1 | 5 | 10 | 15 | 30 | 60`\n\nDefault: `0`", "title": "MonitoringInterval", "type": "number" }, @@ -225402,7 +225392,7 @@ "type": "string" }, "MultiAZ": { - "markdownDescription": "Specifies whether the database instance is a Multi-AZ DB instance deployment. You can't set the `AvailabilityZone` parameter if the `MultiAZ` parameter is set to true.\n\nFor more information, see [Multi-AZ deployments for high availability](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.MultiAZ.html) in the *Amazon RDS User Guide* .\n\n*Amazon Aurora*\n\nNot applicable. Amazon Aurora storage is replicated across all of the Availability Zones and doesn't require the `MultiAZ` option to be set.", + "markdownDescription": "Specifies whether the DB instance is a Multi-AZ deployment. You can't set the `AvailabilityZone` parameter if the DB instance is a Multi-AZ deployment.\n\nThis setting doesn't apply to the following DB instances:\n\n- Amazon Aurora (DB instance Availability Zones (AZs) are managed by the DB cluster.)\n- RDS Custom", "title": "MultiAZ", "type": "boolean" }, @@ -225432,7 +225422,7 @@ "type": "number" }, "Port": { - "markdownDescription": "The port number on which the database accepts connections.\n\n*Amazon Aurora*\n\nNot applicable. The port number is managed by the DB cluster.\n\n*Db2*\n\nDefault value: `50000`", + "markdownDescription": "The port number on which the database accepts connections.\n\nThis setting doesn't apply to Aurora DB instances. The port number is managed by the cluster.\n\nValid Values: `1150-65535`\n\nDefault:\n\n- RDS for Db2 - `50000`\n- RDS for MariaDB - `3306`\n- RDS for Microsoft SQL Server - `1433`\n- RDS for MySQL - `3306`\n- RDS for Oracle - `1521`\n- RDS for PostgreSQL - `5432`\n\nConstraints:\n\n- For RDS for Microsoft SQL Server, the value can't be `1234` , `1434` , `3260` , `3343` , `3389` , `47001` , or `49152-49156` .", "title": "Port", "type": "string" }, @@ -225470,7 +225460,7 @@ "type": "string" }, "RestoreTime": { - "markdownDescription": "The date and time to restore from.\n\nConstraints:\n\n- Must be a time in Universal Coordinated Time (UTC) format.\n- Must be before the latest restorable time for the DB instance.\n- Can't be specified if the `UseLatestRestorableTime` parameter is enabled.\n\nExample: `2009-09-07T23:45:00Z`", + "markdownDescription": "The date and time to restore from. This parameter applies to point-in-time recovery. For more information, see [Restoring a DB instance to a specified time](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_PIT.html) in the in the *Amazon RDS User Guide* .\n\nConstraints:\n\n- Must be a time in Universal Coordinated Time (UTC) format.\n- Must be before the latest restorable time for the DB instance.\n- Can't be specified if the `UseLatestRestorableTime` parameter is enabled.\n\nExample: `2009-09-07T23:45:00Z`", "title": "RestoreTime", "type": "string" }, @@ -225518,7 +225508,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "An optional array of key-value pairs to apply to this DB instance.", + "markdownDescription": "Tags to assign to the DB instance.", "title": "Tags", "type": "array" }, @@ -225533,7 +225523,7 @@ "type": "boolean" }, "UseLatestRestorableTime": { - "markdownDescription": "Specifies whether the DB instance is restored from the latest backup time. By default, the DB instance isn't restored from the latest backup time.\n\nConstraints:\n\n- Can't be specified if the `RestoreTime` parameter is provided.", + "markdownDescription": "Specifies whether the DB instance is restored from the latest backup time. By default, the DB instance isn't restored from the latest backup time. This parameter applies to point-in-time recovery. For more information, see [Restoring a DB instance to a specified time](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_PIT.html) in the in the *Amazon RDS User Guide* .\n\nConstraints:\n\n- Can't be specified if the `RestoreTime` parameter is provided.", "title": "UseLatestRestorableTime", "type": "boolean" }, @@ -225634,7 +225624,7 @@ "type": "string" }, "SecretArn": { - "markdownDescription": "The Amazon Resource Name (ARN) of the secret.", + "markdownDescription": "The Amazon Resource Name (ARN) of the secret. This parameter is a return value that you can retrieve using the `Fn::GetAtt` intrinsic function. For more information, see [Return values](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-rds-dbinstance.html#aws-resource-rds-dbinstance-return-values) .", "title": "SecretArn", "type": "string" } @@ -225703,12 +225693,12 @@ "type": "string" }, "Family": { - "markdownDescription": "The DB parameter group family name. A DB parameter group can be associated with one and only one DB parameter group family, and can be applied only to a DB instance running a DB engine and engine version compatible with that DB parameter group family.\n\n> The DB parameter group family can't be changed when updating a DB parameter group. \n\nTo list all of the available parameter group families, use the following command:\n\n`aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\"`\n\nThe output contains duplicates.\n\nFor more information, see `[CreateDBParameterGroup](https://docs.aws.amazon.com//AmazonRDS/latest/APIReference/API_CreateDBParameterGroup.html)` .", + "markdownDescription": "The DB parameter group family name. A DB parameter group can be associated with one and only one DB parameter group family, and can be applied only to a DB instance running a database engine and engine version compatible with that DB parameter group family.\n\nTo list all of the available parameter group families for a DB engine, use the following command:\n\n`aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\" --engine `\n\nFor example, to list all of the available parameter group families for the MySQL DB engine, use the following command:\n\n`aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\" --engine mysql`\n\n> The output contains duplicates. \n\nThe following are the valid DB engine values:\n\n- `aurora-mysql`\n- `aurora-postgresql`\n- `db2-ae`\n- `db2-se`\n- `mysql`\n- `oracle-ee`\n- `oracle-ee-cdb`\n- `oracle-se2`\n- `oracle-se2-cdb`\n- `postgres`\n- `sqlserver-ee`\n- `sqlserver-se`\n- `sqlserver-ex`\n- `sqlserver-web`", "title": "Family", "type": "string" }, "Parameters": { - "markdownDescription": "An array of parameter names and values for the parameter update. At least one parameter name and value must be supplied. Subsequent arguments are optional.\n\nRDS for Db2 requires you to bring your own Db2 license. You must enter your IBM customer ID ( `rds.ibm_customer_id` ) and site number ( `rds.ibm_site_id` ) before starting a Db2 instance.\n\nFor more information about DB parameters and DB parameter groups for Amazon RDS DB engines, see [Working with DB Parameter Groups](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_WorkingWithParamGroups.html) in the *Amazon RDS User Guide* .\n\nFor more information about DB cluster and DB instance parameters and parameter groups for Amazon Aurora DB engines, see [Working with DB Parameter Groups and DB Cluster Parameter Groups](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_WorkingWithParamGroups.html) in the *Amazon Aurora User Guide* .\n\n> AWS CloudFormation doesn't support specifying an apply method for each individual parameter. The default apply method for each parameter is used.", + "markdownDescription": "An array of parameter names and values for the parameter update. You must specify at least one parameter name and value.\n\nFor more information about parameter groups, see [Working with parameter groups](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_WorkingWithParamGroups.html) in the *Amazon RDS User Guide* , or [Working with parameter groups](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_WorkingWithParamGroups.html) in the *Amazon Aurora User Guide* .\n\n> AWS CloudFormation doesn't support specifying an apply method for each individual parameter. The default apply method for each parameter is used.", "title": "Parameters", "type": "object" }, @@ -225716,7 +225706,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "An optional array of key-value pairs to apply to this DB parameter group.\n\n> Currently, this is the only property that supports drift detection.", + "markdownDescription": "Tags to assign to the DB parameter group.", "title": "Tags", "type": "array" } @@ -225802,7 +225792,7 @@ "type": "boolean" }, "EngineFamily": { - "markdownDescription": "The kinds of databases that the proxy can connect to. This value determines which database network protocol the proxy recognizes when it interprets network traffic to and from the database. For Aurora MySQL, RDS for MariaDB, and RDS for MySQL databases, specify `MYSQL` . For Aurora PostgreSQL and RDS for PostgreSQL databases, specify `POSTGRESQL` . For RDS for Microsoft SQL Server, specify `SQLSERVER` .\n\n*Valid Values* : `MYSQL` | `POSTGRESQL` | `SQLSERVER`", + "markdownDescription": "The kinds of databases that the proxy can connect to. This value determines which database network protocol the proxy recognizes when it interprets network traffic to and from the database. For Aurora MySQL, RDS for MariaDB, and RDS for MySQL databases, specify `MYSQL` . For Aurora PostgreSQL and RDS for PostgreSQL databases, specify `POSTGRESQL` . For RDS for Microsoft SQL Server, specify `SQLSERVER` .", "title": "EngineFamily", "type": "string" }, @@ -225880,7 +225870,7 @@ "additionalProperties": false, "properties": { "AuthScheme": { - "markdownDescription": "The type of authentication that the proxy uses for connections from the proxy to the underlying database.\n\nValid Values: `SECRETS`", + "markdownDescription": "The type of authentication that the proxy uses for connections from the proxy to the underlying database.", "title": "AuthScheme", "type": "string" }, @@ -225895,7 +225885,7 @@ "type": "string" }, "IAMAuth": { - "markdownDescription": "Whether to require or disallow AWS Identity and Access Management (IAM) authentication for connections to the proxy. The `ENABLED` value is valid only for proxies with RDS for Microsoft SQL Server.\n\nValid Values: `ENABLED | DISABLED | REQUIRED`", + "markdownDescription": "A value that indicates whether to require or disallow AWS Identity and Access Management (IAM) authentication for connections to the proxy. The `ENABLED` value is valid only for proxies with RDS for Microsoft SQL Server.", "title": "IAMAuth", "type": "string" }, @@ -225911,12 +225901,12 @@ "additionalProperties": false, "properties": { "Key": { - "markdownDescription": "A key is the required name of the tag. The string value can be 1-128 Unicode characters in length and can't be prefixed with `aws:` . The string can contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\").", + "markdownDescription": "A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and can't be prefixed with `aws:` or `rds:` . The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$\").", "title": "Key", "type": "string" }, "Value": { - "markdownDescription": "A value is the optional value of the tag. The string value can be 1-256 Unicode characters in length and can't be prefixed with `aws:` . The string can contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\").", + "markdownDescription": "A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and can't be prefixed with `aws:` or `rds:` . The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$\").", "title": "Value", "type": "string" } @@ -225977,7 +225967,7 @@ "type": "array" }, "TargetRole": { - "markdownDescription": "A value that indicates whether the DB proxy endpoint can be used for read/write or read-only operations.\n\nValid Values: `READ_WRITE | READ_ONLY`", + "markdownDescription": "A value that indicates whether the DB proxy endpoint can be used for read/write or read-only operations.", "title": "TargetRole", "type": "string" }, @@ -226030,12 +226020,12 @@ "additionalProperties": false, "properties": { "Key": { - "markdownDescription": "A value is the optional value of the tag. The string value can be 1-256 Unicode characters in length and can't be prefixed with `aws:` . The string can contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\").", + "markdownDescription": "A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and can't be prefixed with `aws:` or `rds:` . The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$\").", "title": "Key", "type": "string" }, "Value": { - "markdownDescription": "Metadata assigned to a DB instance consisting of a key-value pair.", + "markdownDescription": "A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and can't be prefixed with `aws:` or `rds:` . The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$\").", "title": "Value", "type": "string" } @@ -226079,7 +226069,7 @@ "properties": { "ConnectionPoolConfigurationInfo": { "$ref": "#/definitions/AWS::RDS::DBProxyTargetGroup.ConnectionPoolConfigurationInfoFormat", - "markdownDescription": "Settings that control the size and behavior of the connection pool associated with a `DBProxyTargetGroup` .", + "markdownDescription": "Displays the settings that control the size and behavior of the connection pool associated with a `DBProxyTarget` .", "title": "ConnectionPoolConfigurationInfo" }, "DBClusterIdentifiers": { @@ -226140,7 +226130,7 @@ "additionalProperties": false, "properties": { "ConnectionBorrowTimeout": { - "markdownDescription": "The number of seconds for a proxy to wait for a connection to become available in the connection pool. This setting only applies when the proxy has opened its maximum number of connections and all connections are busy with client sessions. For an unlimited wait time, specify `0` .\n\nDefault: `120`\n\nConstraints:\n\n- Must be between 0 and 3600.", + "markdownDescription": "The number of seconds for a proxy to wait for a connection to become available in the connection pool. This setting only applies when the proxy has opened its maximum number of connections and all connections are busy with client sessions.\n\nDefault: `120`\n\nConstraints:\n\n- Must be between 0 and 3600.", "title": "ConnectionBorrowTimeout", "type": "number" }, @@ -226214,7 +226204,7 @@ "type": "array" }, "EC2VpcId": { - "markdownDescription": "The identifier of an Amazon VPC. This property indicates the VPC that this DB security group belongs to.\n\n> The `EC2VpcId` property is for backward compatibility with older regions, and is no longer recommended for providing security information to an RDS DB instance.", + "markdownDescription": "The identifier of an Amazon virtual private cloud (VPC). This property indicates the VPC that this DB security group belongs to.\n\n> This property is included for backwards compatibility and is no longer recommended for providing security information to an RDS DB instance.", "title": "EC2VpcId", "type": "string" }, @@ -226227,7 +226217,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "An optional array of key-value pairs to apply to this DB security group.", + "markdownDescription": "Metadata assigned to an Amazon RDS resource consisting of a key-value pair.\n\nFor more information, see [Tagging Amazon RDS resources](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) in the *Amazon RDS User Guide* or [Tagging Amazon Aurora and Amazon RDS resources](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_Tagging.html) in the *Amazon Aurora User Guide* .", "title": "Tags", "type": "array" } @@ -226413,7 +226403,7 @@ "type": "string" }, "DBSubnetGroupName": { - "markdownDescription": "The name for the DB subnet group. This value is stored as a lowercase string.\n\nConstraints: Must contain no more than 255 lowercase alphanumeric characters or hyphens. Must not be \"Default\".\n\nExample: `mysubnetgroup`", + "markdownDescription": "The name for the DB subnet group. This value is stored as a lowercase string.\n\nConstraints:\n\n- Must contain no more than 255 letters, numbers, periods, underscores, spaces, or hyphens.\n- Must not be default.\n- First character must be a letter.\n\nExample: `mydbsubnetgroup`", "title": "DBSubnetGroupName", "type": "string" }, @@ -226429,7 +226419,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "An optional array of key-value pairs to apply to this DB subnet group.", + "markdownDescription": "Tags to assign to the DB subnet group.", "title": "Tags", "type": "array" } @@ -226518,12 +226508,12 @@ "items": { "type": "string" }, - "markdownDescription": "The list of identifiers of the event sources for which events are returned. If not specified, then all sources are included in the response. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens. It can't end with a hyphen or contain two consecutive hyphens.\n\nConstraints:\n\n- If a `SourceIds` value is supplied, `SourceType` must also be provided.\n- If the source type is a DB instance, a `DBInstanceIdentifier` value must be supplied.\n- If the source type is a DB cluster, a `DBClusterIdentifier` value must be supplied.\n- If the source type is a DB parameter group, a `DBParameterGroupName` value must be supplied.\n- If the source type is a DB security group, a `DBSecurityGroupName` value must be supplied.\n- If the source type is a DB snapshot, a `DBSnapshotIdentifier` value must be supplied.\n- If the source type is a DB cluster snapshot, a `DBClusterSnapshotIdentifier` value must be supplied.", + "markdownDescription": "The list of identifiers of the event sources for which events are returned. If not specified, then all sources are included in the response. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens. It can't end with a hyphen or contain two consecutive hyphens.\n\nConstraints:\n\n- If `SourceIds` are supplied, `SourceType` must also be provided.\n- If the source type is a DB instance, a `DBInstanceIdentifier` value must be supplied.\n- If the source type is a DB cluster, a `DBClusterIdentifier` value must be supplied.\n- If the source type is a DB parameter group, a `DBParameterGroupName` value must be supplied.\n- If the source type is a DB security group, a `DBSecurityGroupName` value must be supplied.\n- If the source type is a DB snapshot, a `DBSnapshotIdentifier` value must be supplied.\n- If the source type is a DB cluster snapshot, a `DBClusterSnapshotIdentifier` value must be supplied.\n- If the source type is an RDS Proxy, a `DBProxyName` value must be supplied.", "title": "SourceIds", "type": "array" }, "SourceType": { - "markdownDescription": "The type of source that is generating the events. For example, if you want to be notified of events generated by a DB instance, set this parameter to `db-instance` . If this value isn't specified, all events are returned.\n\nValid values: `db-instance` | `db-cluster` | `db-parameter-group` | `db-security-group` | `db-snapshot` | `db-cluster-snapshot`", + "markdownDescription": "The type of source that is generating the events. For example, if you want to be notified of events generated by a DB instance, you set this parameter to `db-instance` . For RDS Proxy events, specify `db-proxy` . If this value isn't specified, all events are returned.\n\nValid Values: `db-instance | db-cluster | db-parameter-group | db-security-group | db-snapshot | db-cluster-snapshot | db-proxy | zero-etl | custom-engine-version | blue-green-deployment`", "title": "SourceType", "type": "string" }, @@ -226730,7 +226720,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "A list of tags. For more information, see [Tagging Amazon RDS Resources](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) in the *Amazon RDS User Guide.* .", + "markdownDescription": "An optional array of key-value pairs to apply to this integration.", "title": "Tags", "type": "array" }, @@ -226816,7 +226806,7 @@ "items": { "$ref": "#/definitions/AWS::RDS::OptionGroup.OptionConfiguration" }, - "markdownDescription": "A list of options and the settings for each option.", + "markdownDescription": "A list of all available options for an option group.", "title": "OptionConfigurations", "type": "array" }, @@ -226834,7 +226824,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "An optional array of key-value pairs to apply to this option group.", + "markdownDescription": "Tags to assign to the option group.", "title": "Tags", "type": "array" } @@ -226874,7 +226864,7 @@ "items": { "type": "string" }, - "markdownDescription": "A list of DBSecurityGroupMembership name strings used for this option.", + "markdownDescription": "A list of DB security groups used for this option.", "title": "DBSecurityGroupMemberships", "type": "array" }, @@ -226905,7 +226895,7 @@ "items": { "type": "string" }, - "markdownDescription": "A list of VpcSecurityGroupMembership name strings used for this option.", + "markdownDescription": "A list of VPC security group names used for this option.", "title": "VpcSecurityGroupMemberships", "type": "array" } @@ -235020,7 +235010,7 @@ "type": "string" }, "Protocol": { - "markdownDescription": "The protocols for the Resolver endpoints. DoH-FIPS is applicable for inbound endpoints only.\n\nFor an inbound endpoint you can apply the protocols as follows:\n\n- Do53 and DoH in combination.\n- Do53 and DoH-FIPS in combination.\n- Do53 alone.\n- DoH alone.\n- DoH-FIPS alone.\n- None, which is treated as Do53.\n\nFor an outbound endpoint you can apply the protocols as follows:\n\n- Do53 and DoH in combination.\n- Do53 alone.\n- DoH alone.\n- None, which is treated as Do53.", + "markdownDescription": "The protocols for the target address. The protocol you choose needs to be supported by the outbound endpoint of the Resolver rule.", "title": "Protocol", "type": "string" } @@ -235666,7 +235656,7 @@ }, "VersioningConfiguration": { "$ref": "#/definitions/AWS::S3::Bucket.VersioningConfiguration", - "markdownDescription": "Enables multiple versions of all objects in this bucket. You might enable versioning to prevent objects from being deleted or overwritten by mistake or to archive objects so that you can retrieve previous versions of them.", + "markdownDescription": "Enables multiple versions of all objects in this bucket. You might enable versioning to prevent objects from being deleted or overwritten by mistake or to archive objects so that you can retrieve previous versions of them.\n\n> When you enable versioning on a bucket for the first time, it might take a short amount of time for the change to be fully propagated. We recommend that you wait for 15 minutes after enabling versioning before issuing write operations ( `PUT` or `DELETE` ) on objects in the bucket.", "title": "VersioningConfiguration" }, "WebsiteConfiguration": { @@ -236113,7 +236103,7 @@ "items": { "$ref": "#/definitions/AWS::S3::Bucket.Rule" }, - "markdownDescription": "A lifecycle rule for individual objects in an Amazon S3 bucket.", + "markdownDescription": "Specifies lifecycle configuration rules for an Amazon S3 bucket.", "title": "Rules", "type": "array" } @@ -236814,12 +236804,12 @@ "additionalProperties": false, "properties": { "KMSMasterKeyID": { - "markdownDescription": "AWS Key Management Service (KMS) customer AWS KMS key ID to use for the default encryption. This parameter is allowed if and only if `SSEAlgorithm` is set to `aws:kms` or `aws:kms:dsse` .\n\nYou can specify the key ID, key alias, or the Amazon Resource Name (ARN) of the KMS key.\n\n- Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab`\n- Key ARN: `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab`\n- Key Alias: `alias/alias-name`\n\nIf you use a key ID, you can run into a LogDestination undeliverable error when creating a VPC flow log.\n\nIf you are using encryption with cross-account or AWS service operations you must use a fully qualified KMS key ARN. For more information, see [Using encryption for cross-account operations](https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html#bucket-encryption-update-bucket-policy) .\n\n> Amazon S3 only supports symmetric encryption KMS keys. For more information, see [Asymmetric keys in AWS KMS](https://docs.aws.amazon.com//kms/latest/developerguide/symmetric-asymmetric.html) in the *AWS Key Management Service Developer Guide* .", + "markdownDescription": "AWS Key Management Service (KMS) customer managed key ID to use for the default encryption.\n\n> - *General purpose buckets* - This parameter is allowed if and only if `SSEAlgorithm` is set to `aws:kms` or `aws:kms:dsse` .\n> - *Directory buckets* - This parameter is allowed if and only if `SSEAlgorithm` is set to `aws:kms` . \n\nYou can specify the key ID, key alias, or the Amazon Resource Name (ARN) of the KMS key.\n\n- Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab`\n- Key ARN: `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab`\n- Key Alias: `alias/alias-name`\n\nIf you are using encryption with cross-account or AWS service operations, you must use a fully qualified KMS key ARN. For more information, see [Using encryption for cross-account operations](https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html#bucket-encryption-update-bucket-policy) .\n\n> - *General purpose buckets* - If you're specifying a customer managed KMS key, we recommend using a fully qualified KMS key ARN. If you use a KMS key alias instead, then AWS KMS resolves the key within the requester\u2019s account. This behavior can result in data that's encrypted with a KMS key that belongs to the requester, and not the bucket owner. Also, if you use a key ID, you can run into a LogDestination undeliverable error when creating a VPC flow log.\n> - *Directory buckets* - When you specify an [AWS KMS customer managed key](https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk) for encryption in your directory bucket, only use the key ID or key ARN. The key alias format of the KMS key isn't supported. > Amazon S3 only supports symmetric encryption KMS keys. For more information, see [Asymmetric keys in AWS KMS](https://docs.aws.amazon.com//kms/latest/developerguide/symmetric-asymmetric.html) in the *AWS Key Management Service Developer Guide* .", "title": "KMSMasterKeyID", "type": "string" }, "SSEAlgorithm": { - "markdownDescription": "Server-side encryption algorithm to use for the default encryption.", + "markdownDescription": "Server-side encryption algorithm to use for the default encryption.\n\n> For directory buckets, there are only two supported values for server-side encryption: `AES256` and `aws:kms` .", "title": "SSEAlgorithm", "type": "string" } @@ -238133,7 +238123,7 @@ "additionalProperties": false, "properties": { "BucketName": { - "markdownDescription": "A name for the bucket. The bucket name must contain only lowercase letters, numbers, and hyphens (-). A directory bucket name must be unique in the chosen Availability Zone. The bucket name must also follow the format `*bucket_base_name* -- *az_id* --x-s3` (for example, `*DOC-EXAMPLE-BUCKET* -- *usw2-az1* --x-s3` ). If you don't specify a name, AWS CloudFormation generates a unique ID and uses that ID for the bucket name. For information about bucket naming restrictions, see [Directory bucket naming rules](https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) in the *Amazon S3 User Guide* .\n\n> If you specify a name, you can't perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you need to replace the resource, specify a new name.", + "markdownDescription": "A name for the bucket. The bucket name must contain only lowercase letters, numbers, and hyphens (-). A directory bucket name must be unique in the chosen Availability Zone. The bucket name must also follow the format `*bucket_base_name* -- *az_id* --x-s3` (for example, `*bucket_base_name* -- *usw2-az1* --x-s3` ). If you don't specify a name, AWS CloudFormation generates a unique ID and uses that ID for the bucket name. For information about bucket naming restrictions, see [Directory bucket naming rules](https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) in the *Amazon S3 User Guide* .\n\n> If you specify a name, you can't perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you need to replace the resource, specify a new name.", "title": "BucketName", "type": "string" }, @@ -240121,7 +240111,7 @@ "type": "string" }, "KmsKeyArn": { - "markdownDescription": "The customer master key that Amazon SES should use to encrypt your emails before saving them to the Amazon S3 bucket. You can use the default master key or a custom master key that you created in AWS KMS as follows:\n\n- To use the default master key, provide an ARN in the form of `arn:aws:kms:REGION:ACCOUNT-ID-WITHOUT-HYPHENS:alias/aws/ses` . For example, if your AWS account ID is 123456789012 and you want to use the default master key in the US West (Oregon) Region, the ARN of the default master key would be `arn:aws:kms:us-west-2:123456789012:alias/aws/ses` . If you use the default master key, you don't need to perform any extra steps to give Amazon SES permission to use the key.\n- To use a custom master key that you created in AWS KMS, provide the ARN of the master key and ensure that you add a statement to your key's policy to give Amazon SES permission to use it. For more information about giving permissions, see the [Amazon SES Developer Guide](https://docs.aws.amazon.com/ses/latest/dg/receiving-email-permissions.html) .\n\nFor more information about key policies, see the [AWS KMS Developer Guide](https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html) . If you do not specify a master key, Amazon SES does not encrypt your emails.\n\n> Your mail is encrypted by Amazon SES using the Amazon S3 encryption client before the mail is submitted to Amazon S3 for storage. It is not encrypted using Amazon S3 server-side encryption. This means that you must use the Amazon S3 encryption client to decrypt the email after retrieving it from Amazon S3, as the service has no access to use your AWS KMS keys for decryption. This encryption client is currently available with the [AWS SDK for Java](https://docs.aws.amazon.com/sdk-for-java/) and [AWS SDK for Ruby](https://docs.aws.amazon.com/sdk-for-ruby/) only. For more information about client-side encryption using AWS KMS master keys, see the [Amazon S3 Developer Guide](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingClientSideEncryption.html) .", + "markdownDescription": "The customer managed key that Amazon SES should use to encrypt your emails before saving them to the Amazon S3 bucket. You can use the AWS managed key or a customer managed key that you created in AWS KMS as follows:\n\n- To use the AWS managed key, provide an ARN in the form of `arn:aws:kms:REGION:ACCOUNT-ID-WITHOUT-HYPHENS:alias/aws/ses` . For example, if your AWS account ID is 123456789012 and you want to use the AWS managed key in the US West (Oregon) Region, the ARN of the AWS managed key would be `arn:aws:kms:us-west-2:123456789012:alias/aws/ses` . If you use the AWS managed key, you don't need to perform any extra steps to give Amazon SES permission to use the key.\n- To use a customer managed key that you created in AWS KMS, provide the ARN of the customer managed key and ensure that you add a statement to your key's policy to give Amazon SES permission to use it. For more information about giving permissions, see the [Amazon SES Developer Guide](https://docs.aws.amazon.com/ses/latest/dg/receiving-email-permissions.html) .\n\nFor more information about key policies, see the [AWS KMS Developer Guide](https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html) . If you do not specify an AWS KMS key, Amazon SES does not encrypt your emails.\n\n> Your mail is encrypted by Amazon SES using the Amazon S3 encryption client before the mail is submitted to Amazon S3 for storage. It is not encrypted using Amazon S3 server-side encryption. This means that you must use the Amazon S3 encryption client to decrypt the email after retrieving it from Amazon S3, as the service has no access to use your AWS KMS keys for decryption. This encryption client is currently available with the [AWS SDK for Java](https://docs.aws.amazon.com/sdk-for-java/) and [AWS SDK for Ruby](https://docs.aws.amazon.com/sdk-for-ruby/) only. For more information about client-side encryption using AWS KMS managed keys, see the [Amazon S3 Developer Guide](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingClientSideEncryption.html) .", "title": "KmsKeyArn", "type": "string" }, @@ -240601,12 +240591,12 @@ "additionalProperties": false, "properties": { "ArchivePolicy": { - "markdownDescription": "The archive policy determines the number of days Amazon SNS retains messages. You can set a retention period from 1 to 365 days.", + "markdownDescription": "The `ArchivePolicy` determines the number of days Amazon SNS retains messages in FIFO topics. You can set a retention period ranging from 1 to 365 days. This property is only applicable to FIFO topics; attempting to use it with standard topics will result in a creation failure.", "title": "ArchivePolicy", "type": "object" }, "ContentBasedDeduplication": { - "markdownDescription": "Enables content-based deduplication for FIFO topics.\n\n- By default, `ContentBasedDeduplication` is set to `false` . If you create a FIFO topic and this attribute is `false` , you must specify a value for the `MessageDeduplicationId` parameter for the [Publish](https://docs.aws.amazon.com/sns/latest/api/API_Publish.html) action.\n- When you set `ContentBasedDeduplication` to `true` , Amazon SNS uses a SHA-256 hash to generate the `MessageDeduplicationId` using the body of the message (but not the attributes of the message).\n\n(Optional) To override the generated value, you can specify a value for the the `MessageDeduplicationId` parameter for the `Publish` action.", + "markdownDescription": "`ContentBasedDeduplication` enables deduplication of messages based on their content for FIFO topics. By default, this property is set to false. If you create a FIFO topic with `ContentBasedDeduplication` set to false, you must provide a `MessageDeduplicationId` for each `Publish` action. When set to true, Amazon SNS automatically generates a `MessageDeduplicationId` using a SHA-256 hash of the message body (excluding message attributes). You can optionally override this generated value by specifying a `MessageDeduplicationId` in the `Publish` action. Note that this property only applies to FIFO topics; using it with standard topics will cause the creation to fail.", "title": "ContentBasedDeduplication", "type": "boolean" }, @@ -240948,7 +240938,7 @@ "type": "number" }, "FifoQueue": { - "markdownDescription": "If set to true, creates a FIFO queue. If you don't specify this property, Amazon SQS creates a standard queue. For more information, see [FIFO queues](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html) in the *Amazon SQS Developer Guide* .", + "markdownDescription": "If set to true, creates a FIFO queue. If you don't specify this property, Amazon SQS creates a standard queue. For more information, see [Amazon SQS FIFO queues](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-fifo-queues.html) in the *Amazon SQS Developer Guide* .", "title": "FifoQueue", "type": "boolean" }, @@ -240963,7 +240953,7 @@ "type": "number" }, "KmsMasterKeyId": { - "markdownDescription": "The ID of an AWS Key Management Service (KMS) for Amazon SQS , or a custom KMS. To use the AWS managed KMS for Amazon SQS , specify a (default) alias ARN, alias name (e.g. `alias/aws/sqs` ), key ARN, or key ID. For more information, see the following:\n\n- [Encryption at rest](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html) in the *Amazon SQS Developer Guide*\n- [CreateQueue](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_CreateQueue.html) in the *Amazon SQS API Reference*\n- [Request Parameters](https://docs.aws.amazon.com/kms/latest/APIReference/API_DescribeKey.html#API_DescribeKey_RequestParameters) in the *AWS Key Management Service API Reference*\n- The Key Management Service (KMS) section of the [AWS Key Management Service Best Practices](https://docs.aws.amazon.com/https://d0.awsstatic.com/whitepapers/aws-kms-best-practices.pdf) whitepaper", + "markdownDescription": "The ID of an AWS Key Management Service (KMS) for Amazon SQS , or a custom KMS. To use the AWS managed KMS for Amazon SQS , specify a (default) alias ARN, alias name (for example `alias/aws/sqs` ), key ARN, or key ID. For more information, see the following:\n\n- [Encryption at rest](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html) in the *Amazon SQS Developer Guide*\n- [CreateQueue](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_CreateQueue.html) in the *Amazon SQS API Reference*\n- [Request Parameters](https://docs.aws.amazon.com/kms/latest/APIReference/API_DescribeKey.html#API_DescribeKey_RequestParameters) in the *AWS Key Management Service API Reference*\n- The Key Management Service (KMS) section of the [Security best practices for AWS Key Management Service](https://docs.aws.amazon.com/kms/latest/developerguide/best-practices.html) in the *AWS Key Management Service Developer Guide*", "title": "KmsMasterKeyId", "type": "string" }, @@ -240978,7 +240968,7 @@ "type": "number" }, "QueueName": { - "markdownDescription": "A name for the queue. To create a FIFO queue, the name of your FIFO queue must end with the `.fifo` suffix. For more information, see [FIFO queues](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html) in the *Amazon SQS Developer Guide* .\n\nIf you don't specify a name, AWS CloudFormation generates a unique physical ID and uses that ID for the queue name. For more information, see [Name type](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-name.html) in the *AWS CloudFormation User Guide* .\n\n> If you specify a name, you can't perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you must replace the resource, specify a new name.", + "markdownDescription": "A name for the queue. To create a FIFO queue, the name of your FIFO queue must end with the `.fifo` suffix. For more information, see [Amazon SQS FIFO queues](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-fifo-queues.html) in the *Amazon SQS Developer Guide* .\n\nIf you don't specify a name, AWS CloudFormation generates a unique physical ID and uses that ID for the queue name. For more information, see [Name type](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-name.html) in the *AWS CloudFormation User Guide* .\n\n> If you specify a name, you can't perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you must replace the resource, specify a new name.", "title": "QueueName", "type": "string" }, @@ -241533,7 +241523,7 @@ "items": { "type": "string" }, - "markdownDescription": "The value of a key-value pair that identifies the location of an attachment to a document. The format for *Value* depends on the type of key you specify.\n\n- For the key *SourceUrl* , the value is an S3 bucket location. For example:\n\n`\"Values\": [ \"s3://doc-example-bucket/my-folder\" ]`\n- For the key *S3FileUrl* , the value is a file in an S3 bucket. For example:\n\n`\"Values\": [ \"s3://doc-example-bucket/my-folder/my-file.py\" ]`\n- For the key *AttachmentReference* , the value is constructed from the name of another SSM document in your account, a version number of that document, and a file attached to that document version that you want to reuse. For example:\n\n`\"Values\": [ \"MyOtherDocument/3/my-other-file.py\" ]`\n\nHowever, if the SSM document is shared with you from another account, the full SSM document ARN must be specified instead of the document name only. For example:\n\n`\"Values\": [ \"arn:aws:ssm:us-east-2:111122223333:document/OtherAccountDocument/3/their-file.py\" ]`", + "markdownDescription": "The value of a key-value pair that identifies the location of an attachment to a document. The format for *Value* depends on the type of key you specify.\n\n- For the key *SourceUrl* , the value is an S3 bucket location. For example:\n\n`\"Values\": [ \"s3://amzn-s3-demo-bucket/my-prefix\" ]`\n- For the key *S3FileUrl* , the value is a file in an S3 bucket. For example:\n\n`\"Values\": [ \"s3://amzn-s3-demo-bucket/my-prefix/my-file.py\" ]`\n- For the key *AttachmentReference* , the value is constructed from the name of another SSM document in your account, a version number of that document, and a file attached to that document version that you want to reuse. For example:\n\n`\"Values\": [ \"MyOtherDocument/3/my-other-file.py\" ]`\n\nHowever, if the SSM document is shared with you from another account, the full SSM document ARN must be specified instead of the document name only. For example:\n\n`\"Values\": [ \"arn:aws:ssm:us-east-2:111122223333:document/OtherAccountDocument/3/their-file.py\" ]`", "title": "Values", "type": "array" } @@ -241871,7 +241861,7 @@ "type": "number" }, "ServiceRoleArn": { - "markdownDescription": "The Amazon Resource Name (ARN) of the IAM service role for AWS Systems Manager to assume when running a maintenance window task. If you do not specify a service role ARN, Systems Manager uses a service-linked role in your account. If no appropriate service-linked role for Systems Manager exists in your account, it is created when you run `RegisterTaskWithMaintenanceWindow` .\n\nHowever, for an improved security posture, we strongly recommend creating a custom policy and custom service role for running your maintenance window tasks. The policy can be crafted to provide only the permissions needed for your particular maintenance window tasks. For more information, see [Setting up maintenance windows](https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-maintenance-permissions.html) in the in the *AWS Systems Manager User Guide* .", + "markdownDescription": "The Amazon Resource Name (ARN) of the IAM service role for AWS Systems Manager to assume when running a maintenance window task. If you do not specify a service role ARN, Systems Manager uses a service-linked role in your account. If no appropriate service-linked role for Systems Manager exists in your account, it is created when you run `RegisterTaskWithMaintenanceWindow` .\n\nHowever, for an improved security posture, we strongly recommend creating a custom policy and custom service role for running your maintenance window tasks. The policy can be crafted to provide only the permissions needed for your particular maintenance window tasks. For more information, see [Setting up Maintenance Windows](https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-maintenance-permissions.html) in the in the *AWS Systems Manager User Guide* .", "title": "ServiceRoleArn", "type": "string" }, @@ -242065,7 +242055,7 @@ "type": "object" }, "ServiceRoleArn": { - "markdownDescription": "The Amazon Resource Name (ARN) of the IAM service role for AWS Systems Manager to assume when running a maintenance window task. If you do not specify a service role ARN, Systems Manager uses a service-linked role in your account. If no appropriate service-linked role for Systems Manager exists in your account, it is created when you run `RegisterTaskWithMaintenanceWindow` .\n\nHowever, for an improved security posture, we strongly recommend creating a custom policy and custom service role for running your maintenance window tasks. The policy can be crafted to provide only the permissions needed for your particular maintenance window tasks. For more information, see [Setting up maintenance windows](https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-maintenance-permissions.html) in the in the *AWS Systems Manager User Guide* .", + "markdownDescription": "The Amazon Resource Name (ARN) of the IAM service role for AWS Systems Manager to assume when running a maintenance window task. If you do not specify a service role ARN, Systems Manager uses a service-linked role in your account. If no appropriate service-linked role for Systems Manager exists in your account, it is created when you run `RegisterTaskWithMaintenanceWindow` .\n\nHowever, for an improved security posture, we strongly recommend creating a custom policy and custom service role for running your maintenance window tasks. The policy can be crafted to provide only the permissions needed for your particular maintenance window tasks. For more information, see [Setting up Maintenance Windows](https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-maintenance-permissions.html) in the in the *AWS Systems Manager User Guide* .", "title": "ServiceRoleArn", "type": "string" }, @@ -242327,7 +242317,7 @@ "items": { "type": "string" }, - "markdownDescription": "A list of explicitly approved patches for the baseline.\n\nFor information about accepted formats for lists of approved patches and rejected patches, see [About package name formats for approved and rejected patch lists](https://docs.aws.amazon.com/systems-manager/latest/userguide/patch-manager-approved-rejected-package-name-formats.html) in the *AWS Systems Manager User Guide* .", + "markdownDescription": "A list of explicitly approved patches for the baseline.\n\nFor information about accepted formats for lists of approved patches and rejected patches, see [Package name formats for approved and rejected patch lists](https://docs.aws.amazon.com/systems-manager/latest/userguide/patch-manager-approved-rejected-package-name-formats.html) in the *AWS Systems Manager User Guide* .", "title": "ApprovedPatches", "type": "array" }, @@ -242351,7 +242341,7 @@ }, "GlobalFilters": { "$ref": "#/definitions/AWS::SSM::PatchBaseline.PatchFilterGroup", - "markdownDescription": "A set of global filters used to include patches in the baseline.", + "markdownDescription": "A set of global filters used to include patches in the baseline.\n\n> The `GlobalFilters` parameter can be configured only by using the AWS CLI or an AWS SDK. It can't be configured from the Patch Manager console, and its value isn't displayed in the console.", "title": "GlobalFilters" }, "Name": { @@ -242376,12 +242366,12 @@ "items": { "type": "string" }, - "markdownDescription": "A list of explicitly rejected patches for the baseline.\n\nFor information about accepted formats for lists of approved patches and rejected patches, see [About package name formats for approved and rejected patch lists](https://docs.aws.amazon.com/systems-manager/latest/userguide/patch-manager-approved-rejected-package-name-formats.html) in the *AWS Systems Manager User Guide* .", + "markdownDescription": "A list of explicitly rejected patches for the baseline.\n\nFor information about accepted formats for lists of approved patches and rejected patches, see [Package name formats for approved and rejected patch lists](https://docs.aws.amazon.com/systems-manager/latest/userguide/patch-manager-approved-rejected-package-name-formats.html) in the *AWS Systems Manager User Guide* .", "title": "RejectedPatches", "type": "array" }, "RejectedPatchesAction": { - "markdownDescription": "The action for Patch Manager to take on patches included in the `RejectedPackages` list.\n\n- *`ALLOW_AS_DEPENDENCY`* : A package in the `Rejected` patches list is installed only if it is a dependency of another package. It is considered compliant with the patch baseline, and its status is reported as `InstalledOther` . This is the default action if no option is specified.\n- *BLOCK* : Packages in the *Rejected patches* list, and packages that include them as dependencies, aren't installed by Patch Manager under any circumstances. If a package was installed before it was added to the *Rejected patches* list, or is installed outside of Patch Manager afterward, it's considered noncompliant with the patch baseline and its status is reported as *InstalledRejected* .", + "markdownDescription": "The action for Patch Manager to take on patches included in the `RejectedPackages` list.\n\n- **ALLOW_AS_DEPENDENCY** - *Linux and macOS* : A package in the rejected patches list is installed only if it is a dependency of another package. It is considered compliant with the patch baseline, and its status is reported as `INSTALLED_OTHER` . This is the default action if no option is specified.\n\n*Windows Server* : Windows Server doesn't support the concept of package dependencies. If a package in the rejected patches list and already installed on the node, its status is reported as `INSTALLED_OTHER` . Any package not already installed on the node is skipped. This is the default action if no option is specified.\n- **BLOCK** - *All OSs* : Packages in the rejected patches list, and packages that include them as dependencies, aren't installed by Patch Manager under any circumstances. If a package was installed before it was added to the rejected patches list, or is installed outside of Patch Manager afterward, it's considered noncompliant with the patch baseline and its status is reported as `INSTALLED_REJECTED` .", "title": "RejectedPatchesAction", "type": "string" }, @@ -242489,12 +242479,12 @@ "additionalProperties": false, "properties": { "ApproveAfterDays": { - "markdownDescription": "The number of days after the release date of each patch matched by the rule that the patch is marked as approved in the patch baseline. For example, a value of `7` means that patches are approved seven days after they are released.\n\nYou must specify a value for `ApproveAfterDays` .\n\nException: Not supported on Debian Server or Ubuntu Server.", + "markdownDescription": "The number of days after the release date of each patch matched by the rule that the patch is marked as approved in the patch baseline. For example, a value of `7` means that patches are approved seven days after they are released.\n\nThis parameter is marked as `Required: No` , but your request must include a value for either `ApproveAfterDays` or `ApproveUntilDate` .\n\nNot supported for Debian Server or Ubuntu Server.\n\n> Use caution when setting this value for Windows Server patch baselines. Because patch updates that are replaced by later updates are removed, setting too broad a value for this parameter can result in crucial patches not being installed. For more information, see the *Windows Server* tab in the topic [How security patches are selected](https://docs.aws.amazon.com/systems-manager/latest/userguide/patch-manager-selecting-patches.html) in the *AWS Systems Manager User Guide* .", "title": "ApproveAfterDays", "type": "number" }, "ApproveUntilDate": { - "markdownDescription": "The cutoff date for auto approval of released patches. Any patches released on or before this date are installed automatically. Not supported on Debian Server or Ubuntu Server.\n\nEnter dates in the format `YYYY-MM-DD` . For example, `2021-12-31` .", + "markdownDescription": "The cutoff date for auto approval of released patches. Any patches released on or before this date are installed automatically.\n\nEnter dates in the format `YYYY-MM-DD` . For example, `2024-12-31` .\n\nThis parameter is marked as `Required: No` , but your request must include a value for either `ApproveUntilDate` or `ApproveAfterDays` .\n\nNot supported for Debian Server or Ubuntu Server.\n\n> Use caution when setting this value for Windows Server patch baselines. Because patch updates that are replaced by later updates are removed, setting too broad a value for this parameter can result in crucial patches not being installed. For more information, see the *Windows Server* tab in the topic [How security patches are selected](https://docs.aws.amazon.com/systems-manager/latest/userguide/patch-manager-selecting-patches.html) in the *AWS Systems Manager User Guide* .", "title": "ApproveUntilDate", "type": "string" }, @@ -248158,7 +248148,7 @@ "type": "string" }, "Environment": { - "markdownDescription": "The environment variables to set in the Docker container.\n\nThe maximum length of each key and value in the `Environment` map is 1024 bytes. The maximum length of all keys and values in the map, combined, is 32 KB. If you pass multiple containers to a `CreateModel` request, then the maximum length of all of their maps, combined, is also 32 KB.", + "markdownDescription": "The environment variables to set in the Docker container. Don't include any sensitive data in your environment variables.\n\nThe maximum length of each key and value in the `Environment` map is 1024 bytes. The maximum length of all keys and values in the map, combined, is 32 KB. If you pass multiple containers to a `CreateModel` request, then the maximum length of all of their maps, combined, is also 32 KB.", "title": "Environment", "type": "object" }, @@ -254606,7 +254596,7 @@ "properties": { "HostedRotationLambda": { "$ref": "#/definitions/AWS::SecretsManager::RotationSchedule.HostedRotationLambda", - "markdownDescription": "Creates a new Lambda rotation function based on one of the [Secrets Manager rotation function templates](https://docs.aws.amazon.com/secretsmanager/latest/userguide/reference_available-rotation-templates.html) . To use a rotation function that already exists, specify `RotationLambdaARN` instead.\n\nFor Amazon RDS master user credentials, see [AWS::RDS::DBCluster MasterUserSecret](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-rds-dbcluster-masterusersecret.html) .", + "markdownDescription": "Creates a new Lambda rotation function based on one of the [Secrets Manager rotation function templates](https://docs.aws.amazon.com/secretsmanager/latest/userguide/reference_available-rotation-templates.html) . To use a rotation function that already exists, specify `RotationLambdaARN` instead.\n\nFor Amazon RDS master user credentials, see [AWS::RDS::DBCluster MasterUserSecret](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-rds-dbcluster-masterusersecret.html) .\n\nFor Amazon Redshift admin user credentials, see [AWS::Redshift::Cluster](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-redshift-cluster.html) .", "title": "HostedRotationLambda" }, "RotateImmediatelyOnUpdate": { @@ -254615,7 +254605,7 @@ "type": "boolean" }, "RotationLambdaARN": { - "markdownDescription": "The ARN of an existing Lambda rotation function. To specify a rotation function that is also defined in this template, use the [Ref](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-ref.html) function.\n\nFor Amazon RDS master user credentials, see [AWS::RDS::DBCluster MasterUserSecret](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-rds-dbcluster-masterusersecret.html) .\n\nTo create a new rotation function based on one of the [Secrets Manager rotation function templates](https://docs.aws.amazon.com/secretsmanager/latest/userguide/reference_available-rotation-templates.html) , specify `HostedRotationLambda` instead.", + "markdownDescription": "The ARN of an existing Lambda rotation function. To specify a rotation function that is also defined in this template, use the [Ref](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-ref.html) function.\n\nFor Amazon RDS master user credentials, see [AWS::RDS::DBCluster MasterUserSecret](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-rds-dbcluster-masterusersecret.html) .\n\nFor Amazon Redshift admin user credentials, see [AWS::Redshift::Cluster](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-redshift-cluster.html) .\n\nTo create a new rotation function based on one of the [Secrets Manager rotation function templates](https://docs.aws.amazon.com/secretsmanager/latest/userguide/reference_available-rotation-templates.html) , specify `HostedRotationLambda` instead.", "title": "RotationLambdaARN", "type": "string" }, @@ -254625,7 +254615,7 @@ "title": "RotationRules" }, "SecretId": { - "markdownDescription": "The ARN or name of the secret to rotate.\n\nTo reference a secret also created in this template, use the [Ref](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-ref.html) function with the secret's logical ID.", + "markdownDescription": "The ARN or name of the secret to rotate. This is unique for each rotation schedule definition.\n\nTo reference a secret also created in this template, use the [Ref](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-ref.html) function with the secret's logical ID.", "title": "SecretId", "type": "string" } @@ -254951,7 +254941,7 @@ "additionalProperties": false, "properties": { "SecretId": { - "markdownDescription": "The ARN or name of the secret. To reference a secret also created in this template, use the see [Ref](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-ref.html) function with the secret's logical ID.", + "markdownDescription": "The ARN or name of the secret. To reference a secret also created in this template, use the see [Ref](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-ref.html) function with the secret's logical ID. This field is unique for each target attachment definition.", "title": "SecretId", "type": "string" }, @@ -254961,7 +254951,7 @@ "type": "string" }, "TargetType": { - "markdownDescription": "A string that defines the type of service or database associated with the secret. This value instructs Secrets Manager how to update the secret with the details of the service or database. This value must be one of the following:\n\n- AWS::RDS::DBInstance\n- AWS::RDS::DBCluster\n- AWS::Redshift::Cluster\n- AWS::DocDB::DBInstance\n- AWS::DocDB::DBCluster", + "markdownDescription": "A string that defines the type of service or database associated with the secret. This value instructs Secrets Manager how to update the secret with the details of the service or database. This value must be one of the following:\n\n- AWS::RDS::DBInstance\n- AWS::RDS::DBCluster\n- AWS::Redshift::Cluster\n- AWS::RedshiftServerless::Namespace\n- AWS::DocDB::DBInstance\n- AWS::DocDB::DBCluster\n- AWS::DocDBElastic::Cluster", "title": "TargetType", "type": "string" } @@ -255110,7 +255100,7 @@ "title": "FindingFieldsUpdate" }, "Type": { - "markdownDescription": "Specifies that the rule action should update the `Types` finding field. The `Types` finding field classifies findings in the format of namespace/category/classifier. For more information, see [Types taxonomy for ASFF](https://docs.aws.amazon.com/securityhub/latest/userguide/securityhub-findings-format-type-taxonomy.html) in the *AWS Security Hub User Guide* .", + "markdownDescription": "Specifies the type of action that Security Hub takes when a finding matches the defined criteria of a rule.", "title": "Type", "type": "string" } @@ -255607,7 +255597,7 @@ "type": "string" }, "Normalized": { - "markdownDescription": "The normalized severity for the finding. This attribute is to be deprecated in favor of `Label` .\n\nIf you provide `Normalized` and do not provide `Label` , `Label` is set automatically as follows.\n\n- 0 - `INFORMATIONAL`\n- 1\u201339 - `LOW`\n- 40\u201369 - `MEDIUM`\n- 70\u201389 - `HIGH`\n- 90\u2013100 - `CRITICAL`", + "markdownDescription": "The normalized severity for the finding. This attribute is to be deprecated in favor of `Label` .\n\nIf you provide `Normalized` and don't provide `Label` , `Label` is set automatically as follows.\n\n- 0 - `INFORMATIONAL`\n- 1\u201339 - `LOW`\n- 40\u201369 - `MEDIUM`\n- 70\u201389 - `HIGH`\n- 90\u2013100 - `CRITICAL`", "title": "Normalized", "type": "number" }, @@ -255643,7 +255633,7 @@ "additionalProperties": false, "properties": { "Status": { - "markdownDescription": "The status of the investigation into the finding. The workflow status is specific to an individual finding. It does not affect the generation of new findings. For example, setting the workflow status to `SUPPRESSED` or `RESOLVED` does not prevent a new finding for the same issue.\n\nThe allowed values are the following.\n\n- `NEW` - The initial state of a finding, before it is reviewed.\n\nSecurity Hub also resets `WorkFlowStatus` from `NOTIFIED` or `RESOLVED` to `NEW` in the following cases:\n\n- The record state changes from `ARCHIVED` to `ACTIVE` .\n- The compliance status changes from `PASSED` to either `WARNING` , `FAILED` , or `NOT_AVAILABLE` .\n- `NOTIFIED` - Indicates that you notified the resource owner about the security issue. Used when the initial reviewer is not the resource owner, and needs intervention from the resource owner.\n- `RESOLVED` - The finding was reviewed and remediated and is now considered resolved.\n- `SUPPRESSED` - Indicates that you reviewed the finding and do not believe that any action is needed. The finding is no longer updated.", + "markdownDescription": "The status of the investigation into the finding. The workflow status is specific to an individual finding. It does not affect the generation of new findings. For example, setting the workflow status to `SUPPRESSED` or `RESOLVED` does not prevent a new finding for the same issue.\n\nThe allowed values are the following.\n\n- `NEW` - The initial state of a finding, before it is reviewed.\n\nSecurity Hub also resets `WorkFlowStatus` from `NOTIFIED` or `RESOLVED` to `NEW` in the following cases:\n\n- The record state changes from `ARCHIVED` to `ACTIVE` .\n- The compliance status changes from `PASSED` to either `WARNING` , `FAILED` , or `NOT_AVAILABLE` .\n- `NOTIFIED` - Indicates that you notified the resource owner about the security issue. Used when the initial reviewer is not the resource owner, and needs intervention from the resource owner.\n- `RESOLVED` - The finding was reviewed and remediated and is now considered resolved.\n- `SUPPRESSED` - Indicates that you reviewed the finding and don't believe that any action is needed. The finding is no longer updated.", "title": "Status", "type": "string" } @@ -256682,7 +256672,7 @@ "items": { "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" }, - "markdownDescription": "The status of the investigation into a finding. Allowed values are the following.\n\n- `NEW` - The initial state of a finding, before it is reviewed.\n\nSecurity Hub also resets the workflow status from `NOTIFIED` or `RESOLVED` to `NEW` in the following cases:\n\n- `RecordState` changes from `ARCHIVED` to `ACTIVE` .\n- `Compliance.Status` changes from `PASSED` to either `WARNING` , `FAILED` , or `NOT_AVAILABLE` .\n- `NOTIFIED` - Indicates that the resource owner has been notified about the security issue. Used when the initial reviewer is not the resource owner, and needs intervention from the resource owner.\n\nIf one of the following occurs, the workflow status is changed automatically from `NOTIFIED` to `NEW` :\n\n- `RecordState` changes from `ARCHIVED` to `ACTIVE` .\n- `Compliance.Status` changes from `PASSED` to `FAILED` , `WARNING` , or `NOT_AVAILABLE` .\n- `SUPPRESSED` - Indicates that you reviewed the finding and do not believe that any action is needed.\n\nThe workflow status of a `SUPPRESSED` finding does not change if `RecordState` changes from `ARCHIVED` to `ACTIVE` .\n- `RESOLVED` - The finding was reviewed and remediated and is now considered resolved.\n\nThe finding remains `RESOLVED` unless one of the following occurs:\n\n- `RecordState` changes from `ARCHIVED` to `ACTIVE` .\n- `Compliance.Status` changes from `PASSED` to `FAILED` , `WARNING` , or `NOT_AVAILABLE` .\n\nIn those cases, the workflow status is automatically reset to `NEW` .\n\nFor findings from controls, if `Compliance.Status` is `PASSED` , then Security Hub automatically sets the workflow status to `RESOLVED` .", + "markdownDescription": "The status of the investigation into a finding. Allowed values are the following.\n\n- `NEW` - The initial state of a finding, before it is reviewed.\n\nSecurity Hub also resets the workflow status from `NOTIFIED` or `RESOLVED` to `NEW` in the following cases:\n\n- `RecordState` changes from `ARCHIVED` to `ACTIVE` .\n- `Compliance.Status` changes from `PASSED` to either `WARNING` , `FAILED` , or `NOT_AVAILABLE` .\n- `NOTIFIED` - Indicates that the resource owner has been notified about the security issue. Used when the initial reviewer is not the resource owner, and needs intervention from the resource owner.\n\nIf one of the following occurs, the workflow status is changed automatically from `NOTIFIED` to `NEW` :\n\n- `RecordState` changes from `ARCHIVED` to `ACTIVE` .\n- `Compliance.Status` changes from `PASSED` to `FAILED` , `WARNING` , or `NOT_AVAILABLE` .\n- `SUPPRESSED` - Indicates that you reviewed the finding and don't believe that any action is needed.\n\nThe workflow status of a `SUPPRESSED` finding does not change if `RecordState` changes from `ARCHIVED` to `ACTIVE` .\n- `RESOLVED` - The finding was reviewed and remediated and is now considered resolved.\n\nThe finding remains `RESOLVED` unless one of the following occurs:\n\n- `RecordState` changes from `ARCHIVED` to `ACTIVE` .\n- `Compliance.Status` changes from `PASSED` to `FAILED` , `WARNING` , or `NOT_AVAILABLE` .\n\nIn those cases, the workflow status is automatically reset to `NEW` .\n\nFor findings from controls, if `Compliance.Status` is `PASSED` , then Security Hub automatically sets the workflow status to `RESOLVED` .", "title": "WorkflowStatus", "type": "array" } @@ -259908,7 +259898,7 @@ "items": { "$ref": "#/definitions/AWS::ServiceDiscovery::Service.DnsRecord" }, - "markdownDescription": "An array that contains one `DnsRecord` object for each Route\u00a053 DNS record that you want AWS Cloud Map to create when you register an instance.", + "markdownDescription": "An array that contains one `DnsRecord` object for each Route\u00a053 DNS record that you want AWS Cloud Map to create when you register an instance.\n\n> The record type of a service can't be updated directly and can only be changed by deleting the service and recreating it with a new `DnsConfig` .", "title": "DnsRecords", "type": "array" }, @@ -261095,7 +261085,7 @@ "items": { "type": "string" }, - "markdownDescription": "A list of Amazon CloudWatch alarms to be monitored during the deployment. The deployment fails and rolls back if any of these alarms go into the `ALARM` state.", + "markdownDescription": "A list of Amazon CloudWatch alarm names to be monitored during the deployment. The deployment fails and rolls back if any of these alarms go into the `ALARM` state.\n\n> Amazon CloudWatch considers nonexistent alarms to have an `OK` state. If you provide an invalid alarm name or provide the ARN of an alarm instead of its name, your deployment may not roll back correctly.", "title": "Alarms", "type": "array" }, @@ -278802,6 +278792,9 @@ "markdownDescription": "Create a snapshot of any new Lambda function version\\. A snapshot is a cached state of your initialized function, including all of its dependencies\\. The function is initialized just once and the cached state is reused for all future invocations, improving application performance by reducing the number of times your function must be initialized\\. To learn more, see [Improving startup performance with Lambda SnapStart](https://docs.aws.amazon.com/lambda/latest/dg/snapstart.html) in the *AWS Lambda Developer Guide*\\. \n*Type*: [SnapStart](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lambda-function-snapstart.html) \n*Required*: No \n*AWS CloudFormation compatibility*: This property is passed directly to the [`SnapStart`](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lambda-function-snapstart.html) property of an `AWS::Lambda::Function` resource\\.", "title": "SnapStart" }, + "SourceKMSKeyArn": { + "$ref": "#/definitions/PassThroughProp" + }, "Tags": { "markdownDescription": "A map \\(string to string\\) that specifies the tags added to this function\\. For details about valid keys and values for tags, see [Tag Key and Value Requirements](https://docs.aws.amazon.com/lambda/latest/dg/configuration-tags.html#configuration-tags-restrictions) in the *AWS Lambda Developer Guide*\\. \nWhen the stack is created, AWS SAM automatically adds a `lambda:createdBy:SAM` tag to this Lambda function, and to the default roles that are generated for this function\\. \n*Type*: Map \n*Required*: No \n*AWS CloudFormation compatibility*: This property is similar to the [`Tags`](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lambda-function.html#cfn-lambda-function-tags) property of an `AWS::Lambda::Function` resource\\. The `Tags` property in AWS SAM consists of key\\-value pairs \\(whereas in AWS CloudFormation this property consists of a list of `Tag` objects\\)\\. Also, AWS SAM automatically adds a `lambda:createdBy:SAM` tag to this Lambda function, and to the default roles that are generated for this function\\.", "title": "Tags", @@ -279196,6 +279189,9 @@ "markdownDescription": "Create a snapshot of any new Lambda function version\\. A snapshot is a cached state of your initialized function, including all of its dependencies\\. The function is initialized just once and the cached state is reused for all future invocations, improving application performance by reducing the number of times your function must be initialized\\. To learn more, see [Improving startup performance with Lambda SnapStart](https://docs.aws.amazon.com/lambda/latest/dg/snapstart.html) in the *AWS Lambda Developer Guide*\\. \n*Type*: [SnapStart](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lambda-function-snapstart.html) \n*Required*: No \n*AWS CloudFormation compatibility*: This property is passed directly to the [`SnapStart`](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lambda-function-snapstart.html) property of an `AWS::Lambda::Function` resource\\.", "title": "SnapStart" }, + "SourceKMSKeyArn": { + "$ref": "#/definitions/PassThroughProp" + }, "Tags": { "markdownDescription": "A map \\(string to string\\) that specifies the tags added to this function\\. For details about valid keys and values for tags, see [Tag Key and Value Requirements](https://docs.aws.amazon.com/lambda/latest/dg/configuration-tags.html#configuration-tags-restrictions) in the *AWS Lambda Developer Guide*\\. \nWhen the stack is created, AWS SAM automatically adds a `lambda:createdBy:SAM` tag to this Lambda function, and to the default roles that are generated for this function\\. \n*Type*: Map \n*Required*: No \n*AWS CloudFormation compatibility*: This property is similar to the [`Tags`](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lambda-function.html#cfn-lambda-function-tags) property of an `AWS::Lambda::Function` resource\\. The `Tags` property in AWS SAM consists of key\\-value pairs \\(whereas in AWS CloudFormation this property consists of a list of `Tag` objects\\)\\. Also, AWS SAM automatically adds a `lambda:createdBy:SAM` tag to this Lambda function, and to the default roles that are generated for this function\\.", "title": "Tags", @@ -281024,6 +281020,10 @@ ], "markdownDescription": "The type of the state machine\\. \n*Valid values*: `STANDARD` or `EXPRESS` \n*Type*: String \n*Required*: No \n*Default*: `STANDARD` \n*AWS CloudFormation compatibility*: This property is passed directly to the [`StateMachineType`](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-stepfunctions-statemachine.html#cfn-stepfunctions-statemachine-statemachinetype) property of an `AWS::StepFunctions::StateMachine` resource\\.", "title": "Type" + }, + "UseAliasAsEventTarget": { + "title": "Usealiasaseventtarget", + "type": "boolean" } }, "title": "Properties", diff --git a/schema_source/cloudformation-docs.json b/schema_source/cloudformation-docs.json index f49fa532d..772079fbb 100644 --- a/schema_source/cloudformation-docs.json +++ b/schema_source/cloudformation-docs.json @@ -98,7 +98,7 @@ "CsrExtensions": "Specifies information to be added to the extension section of the certificate signing request (CSR).", "KeyAlgorithm": "Type of the public key algorithm and size, in bits, of the key pair that your CA creates when it issues a certificate. When you create a subordinate CA, you must use a key algorithm supported by the parent CA.", "KeyStorageSecurityStandard": "Specifies a cryptographic key management compliance standard used for handling CA keys.\n\nDefault: FIPS_140_2_LEVEL_3_OR_HIGHER\n\n> Some AWS Regions do not support the default. When creating a CA in these Regions, you must provide `FIPS_140_2_LEVEL_2_OR_HIGHER` as the argument for `KeyStorageSecurityStandard` . Failure to do this results in an `InvalidArgsException` with the message, \"A certificate authority cannot be created in this region with the specified security standard.\"\n> \n> For information about security standard support in various Regions, see [Storage and security compliance of AWS Private CA private keys](https://docs.aws.amazon.com/privateca/latest/userguide/data-protection.html#private-keys) .", - "RevocationConfiguration": "Certificate revocation information used by the [CreateCertificateAuthority](https://docs.aws.amazon.com/privateca/latest/APIReference/API_CreateCertificateAuthority.html) and [UpdateCertificateAuthority](https://docs.aws.amazon.com/privateca/latest/APIReference/API_UpdateCertificateAuthority.html) actions. Your private certificate authority (CA) can configure Online Certificate Status Protocol (OCSP) support and/or maintain a certificate revocation list (CRL). OCSP returns validation information about certificates as requested by clients, and a CRL contains an updated list of certificates revoked by your CA. For more information, see [RevokeCertificate](https://docs.aws.amazon.com/privateca/latest/APIReference/API_RevokeCertificate.html) in the *AWS Private CA API Reference* and [Setting up a certificate revocation method](https://docs.aws.amazon.com/privateca/latest/userguide/revocation-setup.html) in the *AWS Private CA User Guide* .\n\n> The following requirements apply to revocation configurations.\n> \n> - A configuration disabling CRLs or OCSP must contain only the `Enabled=False` parameter, and will fail if other parameters such as `CustomCname` or `ExpirationInDays` are included.\n> - In a CRL configuration, the `S3BucketName` parameter must conform to the [Amazon S3 bucket naming rules](https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html) .\n> - A configuration containing a custom Canonical Name (CNAME) parameter for CRLs or OCSP must conform to [RFC2396](https://docs.aws.amazon.com/https://www.ietf.org/rfc/rfc2396.txt) restrictions on the use of special characters in a CNAME.\n> - In a CRL or OCSP configuration, the value of a CNAME parameter must not include a protocol prefix such as \"http://\" or \"https://\".", + "RevocationConfiguration": "Information about the Online Certificate Status Protocol (OCSP) configuration or certificate revocation list (CRL) created and maintained by your private CA.", "SigningAlgorithm": "Name of the algorithm your private CA uses to sign certificate requests.\n\nThis parameter should not be confused with the `SigningAlgorithm` parameter used to sign certificates when they are issued.", "Subject": "Structure that contains X.500 distinguished name information for your private CA.", "Tags": "Key-value pairs that will be attached to the new private CA. You can associate up to 50 tags with a private CA. For information using tags with IAM to manage permissions, see [Controlling Access Using IAM Tags](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_iam-tags.html) .", @@ -254,6 +254,9 @@ "Key": "The key of the tag. Must not begin with `aws:` .", "Value": "The value of the tag." }, + "AWS::ARCZonalShift::AutoshiftObserverNotificationStatus": { + "Status": "" + }, "AWS::ARCZonalShift::ZonalAutoshiftConfiguration": { "PracticeRunConfiguration": "A practice run configuration for a resource includes the Amazon CloudWatch alarms that you've specified for a practice run, as well as any blocked dates or blocked windows for the practice run. When a resource has a practice run configuration, Route 53 ARC shifts traffic for the resource weekly for practice runs.\n\nPractice runs are required for zonal autoshift. The zonal shifts that Route 53 ARC starts for practice runs help you to ensure that shifting away traffic from an Availability Zone during an autoshift is safe for your application.\n\nYou can update or delete a practice run configuration. Before you delete a practice run configuration, you must disable zonal autoshift for the resource. A practice run configuration is required when zonal autoshift is enabled.", "ResourceIdentifier": "The identifier for the resource that AWS shifts traffic for. The identifier is the Amazon Resource Name (ARN) for the resource.\n\nAt this time, supported resources are Network Load Balancers and Application Load Balancers with cross-zone load balancing turned off.", @@ -386,6 +389,7 @@ "AutoBranchCreationConfig": "Sets the configuration for your automatic branch creation.", "BasicAuthConfig": "The credentials for basic authorization for an Amplify app. You must base64-encode the authorization credentials and provide them in the format `user:password` .", "BuildSpec": "The build specification (build spec) for an Amplify app.", + "CacheConfig": "The cache configuration for the Amplify app. If you don't specify the cache configuration `type` , Amplify uses the default `AMPLIFY_MANAGED` setting.", "CustomHeaders": "The custom HTTP headers for an Amplify app.", "CustomRules": "The custom rewrite and redirect rules for an Amplify app.", "Description": "The description of the Amplify app.", @@ -394,7 +398,7 @@ "IAMServiceRole": "AWS Identity and Access Management ( IAM ) service role for the Amazon Resource Name (ARN) of the Amplify app.", "Name": "The name of the Amplify app.", "OauthToken": "The OAuth token for a third-party source control system for an Amplify app. The OAuth token is used to create a webhook and a read-only deploy key using SSH cloning. The OAuth token is not stored.\n\nUse `OauthToken` for repository providers other than GitHub, such as Bitbucket or CodeCommit. To authorize access to GitHub as your repository provider, use `AccessToken` .\n\nYou must specify either `OauthToken` or `AccessToken` when you create a new app.\n\nExisting Amplify apps deployed from a GitHub repository using OAuth continue to work with CI/CD. However, we strongly recommend that you migrate these apps to use the GitHub App. For more information, see [Migrating an existing OAuth app to the Amplify GitHub App](https://docs.aws.amazon.com/amplify/latest/userguide/setting-up-GitHub-access.html#migrating-to-github-app-auth) in the *Amplify User Guide* .", - "Platform": "The platform for the Amplify app. For a static app, set the platform type to `WEB` . For a dynamic server-side rendered (SSR) app, set the platform type to `WEB_COMPUTE` . For an app requiring Amplify Hosting's original SSR support only, set the platform type to `WEB_DYNAMIC` .", + "Platform": "The platform for the Amplify app. For a static app, set the platform type to `WEB` . For a dynamic server-side rendered (SSR) app, set the platform type to `WEB_COMPUTE` . For an app requiring Amplify Hosting's original SSR support only, set the platform type to `WEB_DYNAMIC` .\n\nIf you are deploying an SSG only app with Next.js version 14 or later, you must set the platform type to `WEB_COMPUTE` and set the artifacts `baseDirectory` to `.next` in the application's build settings. For an example of the build specification settings, see [Amplify build settings for a Next.js 14 SSG application](https://docs.aws.amazon.com/amplify/latest/userguide/deploy-nextjs-app.html#build-setting-detection-ssg-14) in the *Amplify Hosting User Guide* .", "Repository": "The Git repository for the Amplify app.", "Tags": "The tag for an Amplify app." }, @@ -416,6 +420,9 @@ "Password": "The password for basic authorization.", "Username": "The user name for basic authorization." }, + "AWS::Amplify::App CacheConfig": { + "Type": "The type of cache configuration to use for an Amplify app.\n\nThe `AMPLIFY_MANAGED` cache configuration automatically applies an optimized cache configuration for your app based on its platform, routing rules, and rewrite rules. This is the default setting.\n\nThe `AMPLIFY_MANAGED_NO_COOKIES` cache configuration type is the same as `AMPLIFY_MANAGED` , except that it excludes all cookies from the cache key." + }, "AWS::Amplify::App CustomRule": { "Condition": "The condition for a URL rewrite or redirect rule, such as a country code.", "Source": "The source pattern for a URL rewrite or redirect rule.", @@ -1191,6 +1198,13 @@ "TimeoutInMillis": "Custom timeout between 50 and 29,000 milliseconds for WebSocket APIs and between 50 and 30,000 milliseconds for HTTP APIs. The default timeout is 29 seconds for WebSocket APIs and 30 seconds for HTTP APIs.", "TlsConfig": "The TLS configuration for a private integration. If you specify a TLS configuration, private integration traffic uses the HTTPS protocol. Supported only for HTTP APIs." }, + "AWS::ApiGatewayV2::Integration ResponseParameter": { + "Destination": "Specifies the location of the response to modify, and how to modify it. To learn more, see [Transforming API requests and responses](https://docs.aws.amazon.com/apigateway/latest/developerguide/http-api-parameter-mapping.html) .", + "Source": "Specifies the data to update the parameter with. To learn more, see [Transforming API requests and responses](https://docs.aws.amazon.com/apigateway/latest/developerguide/http-api-parameter-mapping.html) ." + }, + "AWS::ApiGatewayV2::Integration ResponseParameterMap": { + "ResponseParameters": "" + }, "AWS::ApiGatewayV2::Integration TlsConfig": { "ServerNameToVerify": "If you specify a server name, API Gateway uses it to verify the hostname on the integration's certificate. The server name is also included in the TLS handshake to support Server Name Indication (SNI) or virtual hosting." }, @@ -1279,7 +1293,7 @@ "ApplicationId": "The application ID.", "Description": "A description of the configuration profile.", "KmsKeyIdentifier": "The AWS Key Management Service key identifier (key ID, key alias, or key ARN) provided when the resource was created or updated.", - "LocationUri": "A URI to locate the configuration. You can specify the following:\n\n- For the AWS AppConfig hosted configuration store and for feature flags, specify `hosted` .\n- For an AWS Systems Manager Parameter Store parameter, specify either the parameter name in the format `ssm-parameter://` or the ARN.\n- For an AWS CodePipeline pipeline, specify the URI in the following format: `codepipeline` ://.\n- For an AWS Secrets Manager secret, specify the URI in the following format: `secretsmanager` ://.\n- For an Amazon S3 object, specify the URI in the following format: `s3:///` . Here is an example: `s3://my-bucket/my-app/us-east-1/my-config.json`\n- For an SSM document, specify either the document name in the format `ssm-document://` or the Amazon Resource Name (ARN).", + "LocationUri": "A URI to locate the configuration. You can specify the following:\n\n- For the AWS AppConfig hosted configuration store and for feature flags, specify `hosted` .\n- For an AWS Systems Manager Parameter Store parameter, specify either the parameter name in the format `ssm-parameter://` or the ARN.\n- For an AWS CodePipeline pipeline, specify the URI in the following format: `codepipeline` ://.\n- For an AWS Secrets Manager secret, specify the URI in the following format: `secretsmanager` ://.\n- For an Amazon S3 object, specify the URI in the following format: `s3:///` . Here is an example: `s3://amzn-s3-demo-bucket/my-app/us-east-1/my-config.json`\n- For an SSM document, specify either the document name in the format `ssm-document://` or the Amazon Resource Name (ARN).", "Name": "A name for the configuration profile.", "RetrievalRoleArn": "The ARN of an IAM role with permission to access the configuration at the specified `LocationUri` .\n\n> A retrieval role ARN is not required for configurations stored in the AWS AppConfig hosted configuration store. It is required for all other sources that store your configuration.", "Tags": "Metadata to assign to the configuration profile. Tags help organize and categorize your AWS AppConfig resources. Each tag consists of a key and an optional value, both of which you define.", @@ -3175,7 +3189,7 @@ "AWS::AppSync::GraphQLApi LogConfig": { "CloudWatchLogsRoleArn": "The service role that AWS AppSync will assume to publish to Amazon CloudWatch Logs in your account.", "ExcludeVerboseContent": "Set to TRUE to exclude sections that contain information such as headers, context, and evaluated mapping templates, regardless of logging level.", - "FieldLogLevel": "The field logging level. Values can be NONE, ERROR, or ALL.\n\n- *NONE* : No field-level logs are captured.\n- *ERROR* : Logs the following information only for the fields that are in error:\n\n- The error section in the server response.\n- Field-level errors.\n- The generated request/response functions that got resolved for error fields.\n- *ALL* : The following information is logged for all fields in the query:\n\n- Field-level tracing information.\n- The generated request/response functions that got resolved for each field." + "FieldLogLevel": "The field logging level. Values can be NONE, ERROR, INFO, DEBUG, or ALL.\n\n- *NONE* : No field-level logs are captured.\n- *ERROR* : Logs the following information *only* for the fields that are in the error category:\n\n- The error section in the server response.\n- Field-level errors.\n- The generated request/response functions that got resolved for error fields.\n- *INFO* : Logs the following information *only* for the fields that are in the info and error categories:\n\n- Info-level messages.\n- The user messages sent through `$util.log.info` and `console.log` .\n- Field-level tracing and mapping logs are not shown.\n- *DEBUG* : Logs the following information *only* for the fields that are in the debug, info, and error categories:\n\n- Debug-level messages.\n- The user messages sent through `$util.log.info` , `$util.log.debug` , `console.log` , and `console.debug` .\n- Field-level tracing and mapping logs are not shown.\n- *ALL* : The following information is logged for all fields in the query:\n\n- Field-level tracing information.\n- The generated request/response functions that were resolved for each field." }, "AWS::AppSync::GraphQLApi OpenIDConnectConfig": { "AuthTTL": "The number of milliseconds that a token is valid after being authenticated.", @@ -3288,17 +3302,17 @@ "TargetLocation": "The target location of the input file." }, "AWS::AppTest::TestCase M2ManagedActionProperties": { - "ForceStop": "Force stops the AWS Mainframe Modernization managed action properties.", - "ImportDataSetLocation": "The import data set location of the AWS Mainframe Modernization managed action properties." + "ForceStop": "Force stops the Mainframe Modernization managed action properties.", + "ImportDataSetLocation": "The import data set location of the Mainframe Modernization managed action properties." }, "AWS::AppTest::TestCase M2ManagedApplicationAction": { - "ActionType": "The action type of the AWS Mainframe Modernization managed application action.", - "Properties": "The properties of the AWS Mainframe Modernization managed application action.", - "Resource": "The resource of the AWS Mainframe Modernization managed application action." + "ActionType": "The action type of the Mainframe Modernization managed application action.", + "Properties": "The properties of the Mainframe Modernization managed application action.", + "Resource": "The resource of the Mainframe Modernization managed application action." }, "AWS::AppTest::TestCase M2NonManagedApplicationAction": { - "ActionType": "The action type of the AWS Mainframe Modernization non-managed application action.", - "Resource": "The resource of the AWS Mainframe Modernization non-managed application action." + "ActionType": "The action type of the Mainframe Modernization non-managed application action.", + "Resource": "The resource of the Mainframe Modernization non-managed application action." }, "AWS::AppTest::TestCase MainframeAction": { "ActionType": "The action type of the mainframe action.", @@ -3320,8 +3334,8 @@ }, "AWS::AppTest::TestCase ResourceAction": { "CloudFormationAction": "The CloudFormation action of the resource action.", - "M2ManagedApplicationAction": "The AWS Mainframe Modernization managed application action of the resource action.", - "M2NonManagedApplicationAction": "The AWS Mainframe Modernization non-managed application action of the resource action." + "M2ManagedApplicationAction": "The Mainframe Modernization managed application action of the resource action.", + "M2NonManagedApplicationAction": "The Mainframe Modernization non-managed application action of the resource action." }, "AWS::AppTest::TestCase Script": { "ScriptLocation": "The script location of the scripts.", @@ -3451,7 +3465,7 @@ "AttachMissingPermission": "If set to true, the managed policies for SSM and CW will be attached to the instance roles if they are missing.", "AutoConfigurationEnabled": "If set to `true` , the application components will be configured with the monitoring configuration recommended by Application Insights.", "CWEMonitorEnabled": "Indicates whether Application Insights can listen to CloudWatch events for the application resources, such as `instance terminated` , `failed deployment` , and others.", - "ComponentMonitoringSettings": "The monitoring settings of the components.", + "ComponentMonitoringSettings": "The monitoring settings of the components. Not required to set up default monitoring for all components. To set up default monitoring for all components, set `AutoConfigurationEnabled` to `true` .", "CustomComponents": "Describes a custom component by grouping similar standalone instances to monitor.", "GroupingType": "Application Insights can create applications based on a resource group or on an account. To create an account-based application using all of the resources in the account, set this parameter to `ACCOUNT_BASED` .", "LogPatternSets": "The log pattern sets.", @@ -3472,9 +3486,9 @@ "SubComponentTypeConfigurations": "Sub-component configurations of the component." }, "AWS::ApplicationInsights::Application ComponentMonitoringSetting": { - "ComponentARN": "The ARN of the component.", + "ComponentARN": "The ARN of the component. Either the component ARN or the component name is required.", "ComponentConfigurationMode": "Component monitoring can be configured in one of the following three modes:\n\n- `DEFAULT` : The component will be configured with the recommended default monitoring settings of the selected `Tier` .\n- `CUSTOM` : The component will be configured with the customized monitoring settings that are specified in `CustomComponentConfiguration` . If used, `CustomComponentConfiguration` must be provided.\n- `DEFAULT_WITH_OVERWRITE` : The component will be configured with the recommended default monitoring settings of the selected `Tier` , and merged with customized overwrite settings that are specified in `DefaultOverwriteComponentConfiguration` . If used, `DefaultOverwriteComponentConfiguration` must be provided.", - "ComponentName": "The name of the component.", + "ComponentName": "The name of the component. Either the component ARN or the component name is required.", "CustomComponentConfiguration": "Customized monitoring settings. Required if CUSTOM mode is configured in `ComponentConfigurationMode` .", "DefaultOverwriteComponentConfiguration": "Customized overwrite monitoring settings. Required if CUSTOM mode is configured in `ComponentConfigurationMode` .", "Tier": "The tier of the application component. Supported tiers include `DOT_NET_CORE` , `DOT_NET_WORKER` , `DOT_NET_WEB` , `SQL_SERVER` , `SQL_SERVER_ALWAYSON_AVAILABILITY_GROUP` , `SQL_SERVER_FAILOVER_CLUSTER_INSTANCE` , `MYSQL` , `POSTGRESQL` , `JAVA_JMX` , `ORACLE` , `SAP_HANA_MULTI_NODE` , `SAP_HANA_SINGLE_NODE` , `SAP_HANA_HIGH_AVAILABILITY` , `SHAREPOINT` . `ACTIVE_DIRECTORY` , and `DEFAULT` ." @@ -3563,7 +3577,8 @@ "Description": "An optional description for this SLO.", "Goal": "This structure contains the attributes that determine the goal of an SLO. This includes the time period for evaluation and the attainment threshold.", "Name": "A name for this SLO.", - "Sli": "A structure containing information about the performance metric that this SLO monitors.", + "RequestBasedSli": "A structure containing information about the performance metric that this SLO monitors, if this is a request-based SLO.", + "Sli": "A structure containing information about the performance metric that this SLO monitors, if this is a period-based SLO.", "Tags": "A list of key-value pairs to associate with the SLO. You can associate as many as 50 tags with an SLO. To be able to associate tags with the SLO when you create the SLO, you must have the cloudwatch:TagResource permission.\n\nTags can help you organize and categorize your resources. You can also use them to scope user permissions by granting a user permission to access or change only resources with certain tag values." }, "AWS::ApplicationSignals::ServiceLevelObjective CalendarInterval": { @@ -3576,7 +3591,7 @@ "Value": "The value of the dimension. Dimension values must contain only ASCII characters and must include at least one non-whitespace character. ASCII control characters are not supported as part of dimension values." }, "AWS::ApplicationSignals::ServiceLevelObjective Goal": { - "AttainmentGoal": "The threshold that determines if the goal is being met. An *attainment goal* is the ratio of good periods that meet the threshold requirements to the total periods within the interval. For example, an attainment goal of 99.9% means that within your interval, you are targeting 99.9% of the periods to be in healthy state.\n\nIf you omit this parameter, 99 is used to represent 99% as the attainment goal.", + "AttainmentGoal": "The threshold that determines if the goal is being met.\n\nIf this is a period-based SLO, the attainment goal is the percentage of good periods that meet the threshold requirements to the total periods within the interval. For example, an attainment goal of 99.9% means that within your interval, you are targeting 99.9% of the periods to be in healthy state.\n\nIf this is a request-based SLO, the attainment goal is the percentage of requests that must be successful to meet the attainment goal.\n\nIf you omit this parameter, 99 is used to represent 99% as the attainment goal.", "Interval": "The time period used to evaluate the SLO. It can be either a calendar interval or rolling interval.\n\nIf you omit this parameter, a rolling interval of 7 days is used.", "WarningThreshold": "The percentage of remaining budget over total budget that you want to get warnings for. If you omit this parameter, the default of 50.0 is used." }, @@ -3602,6 +3617,22 @@ "Stat": "The statistic to use for comparison to the threshold. It can be any CloudWatch statistic or extended statistic. For more information about statistics, see [CloudWatch statistics definitions](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/Statistics-definitions.html) .", "Unit": "If you omit `Unit` then all data that was collected with any unit is returned, along with the corresponding units that were specified when the data was reported to CloudWatch. If you specify a unit, the operation returns only data that was collected with that unit specified. If you specify a unit that does not match the data collected, the results of the operation are null. CloudWatch does not perform unit conversions." }, + "AWS::ApplicationSignals::ServiceLevelObjective MonitoredRequestCountMetric": { + "BadCountMetric": "If you want to count \"bad requests\" to determine the percentage of successful requests for this request-based SLO, specify the metric to use as \"bad requests\" in this structure.", + "GoodCountMetric": "If you want to count \"good requests\" to determine the percentage of successful requests for this request-based SLO, specify the metric to use as \"good requests\" in this structure." + }, + "AWS::ApplicationSignals::ServiceLevelObjective RequestBasedSli": { + "ComparisonOperator": "The arithmetic operation used when comparing the specified metric to the threshold.", + "MetricThreshold": "This value is the threshold that the observed metric values of the SLI metric are compared to.", + "RequestBasedSliMetric": "A structure that contains information about the metric that the SLO monitors." + }, + "AWS::ApplicationSignals::ServiceLevelObjective RequestBasedSliMetric": { + "KeyAttributes": "This is a string-to-string map that contains information about the type of object that this SLO is related to. It can include the following fields.\n\n- `Type` designates the type of object that this SLO is related to.\n- `ResourceType` specifies the type of the resource. This field is used only when the value of the `Type` field is `Resource` or `AWS::Resource` .\n- `Name` specifies the name of the object. This is used only if the value of the `Type` field is `Service` , `RemoteService` , or `AWS::Service` .\n- `Identifier` identifies the resource objects of this resource. This is used only if the value of the `Type` field is `Resource` or `AWS::Resource` .\n- `Environment` specifies the location where this object is hosted, or what it belongs to.", + "MetricType": "If the SLO monitors either the `LATENCY` or `AVAILABILITY` metric that Application Signals collects, this field displays which of those metrics is used.", + "MonitoredRequestCountMetric": "Use this structure to define the metric that you want to use as the \"good request\" or \"bad request\" value for a request-based SLO. This value observed for the metric defined in `TotalRequestCountMetric` will be divided by the number found for `MonitoredRequestCountMetric` to determine the percentage of successful requests that this SLO tracks.", + "OperationName": "If the SLO monitors a specific operation of the service, this field displays that operation name.", + "TotalRequestCountMetric": "This structure defines the metric that is used as the \"total requests\" number for a request-based SLO. The number observed for this metric is divided by the number of \"good requests\" or \"bad requests\" that is observed for the metric defined in `MonitoredRequestCountMetric` ." + }, "AWS::ApplicationSignals::ServiceLevelObjective RollingInterval": { "Duration": "Specifies the duration of each rolling interval. For example, if `Duration` is `7` and `DurationUnit` is `DAY` , each rolling interval is seven days.", "DurationUnit": "Specifies the rolling interval unit." @@ -3672,7 +3703,7 @@ "WorkGroupConfiguration": "The configuration of the workgroup, which includes the location in Amazon S3 where query results are stored, the encryption option, if any, used for query results, whether Amazon CloudWatch Metrics are enabled for the workgroup, and the limit for the amount of bytes scanned (cutoff) per query, if it is specified. The `EnforceWorkGroupConfiguration` option determines whether workgroup settings override client-side query settings." }, "AWS::Athena::WorkGroup AclConfiguration": { - "S3AclOption": "The Amazon S3 canned ACL that Athena should specify when storing query results. Currently the only supported canned ACL is `BUCKET_OWNER_FULL_CONTROL` . If a query runs in a workgroup and the workgroup overrides client-side settings, then the Amazon S3 canned ACL specified in the workgroup's settings is used for all queries that run in the workgroup. For more information about Amazon S3 canned ACLs, see [Canned ACL](https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#canned-acl) in the *Amazon S3 User Guide* ." + "S3AclOption": "The Amazon S3 canned ACL that Athena should specify when storing query results, including data files inserted by Athena as the result of statements like CTAS or INSERT INTO. Currently the only supported canned ACL is `BUCKET_OWNER_FULL_CONTROL` . If a query runs in a workgroup and the workgroup overrides client-side settings, then the Amazon S3 canned ACL specified in the workgroup's settings is used for all queries that run in the workgroup. For more information about Amazon S3 canned ACLs, see [Canned ACL](https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#canned-acl) in the *Amazon S3 User Guide* ." }, "AWS::Athena::WorkGroup CustomerContentEncryptionConfiguration": { "KmsKey": "The customer managed KMS key that is used to encrypt the user's data stores in Athena." @@ -3687,9 +3718,9 @@ }, "AWS::Athena::WorkGroup ResultConfiguration": { "AclConfiguration": "Indicates that an Amazon S3 canned ACL should be set to control ownership of stored query results. Currently the only supported canned ACL is `BUCKET_OWNER_FULL_CONTROL` . This is a client-side setting. If workgroup settings override client-side settings, then the query uses the ACL configuration that is specified for the workgroup, and also uses the location for storing query results specified in the workgroup. See `EnforceWorkGroupConfiguration` .", - "EncryptionConfiguration": "If query results are encrypted in Amazon S3, indicates the encryption option used (for example, `SSE_KMS` or `CSE_KMS` ) and key information. This is a client-side setting. If workgroup settings override client-side settings, then the query uses the encryption configuration that is specified for the workgroup, and also uses the location for storing query results specified in the workgroup. See `EnforceWorkGroupConfiguration` and [Workgroup Settings Override Client-Side Settings](https://docs.aws.amazon.com/athena/latest/ug/workgroups-settings-override.html) .", + "EncryptionConfiguration": "If query results are encrypted in Amazon S3, indicates the encryption option used (for example, `SSE_KMS` or `CSE_KMS` ) and key information. This is a client-side setting. If workgroup settings override client-side settings, then the query uses the encryption configuration that is specified for the workgroup, and also uses the location for storing query results specified in the workgroup. See `EnforceWorkGroupConfiguration` and [Override client-side settings](https://docs.aws.amazon.com/athena/latest/ug/workgroups-settings-override.html) .", "ExpectedBucketOwner": "The account ID that you expect to be the owner of the Amazon S3 bucket specified by `ResultConfiguration:OutputLocation` . If set, Athena uses the value for `ExpectedBucketOwner` when it makes Amazon S3 calls to your specified output location. If the `ExpectedBucketOwner` account ID does not match the actual owner of the Amazon S3 bucket, the call fails with a permissions error.\n\nThis is a client-side setting. If workgroup settings override client-side settings, then the query uses the `ExpectedBucketOwner` setting that is specified for the workgroup, and also uses the location for storing query results specified in the workgroup. See `EnforceWorkGroupConfiguration` .", - "OutputLocation": "The location in Amazon S3 where your query results are stored, such as `s3://path/to/query/bucket/` . To run a query, you must specify the query results location using either a client-side setting for individual queries or a location specified by the workgroup. If workgroup settings override client-side settings, then the query uses the location specified for the workgroup. If no query location is set, Athena issues an error. For more information, see [Working with Query Results, Output Files, and Query History](https://docs.aws.amazon.com/athena/latest/ug/querying.html) and `EnforceWorkGroupConfiguration` ." + "OutputLocation": "The location in Amazon S3 where your query results are stored, such as `s3://path/to/query/bucket/` . To run a query, you must specify the query results location using either a client-side setting for individual queries or a location specified by the workgroup. If workgroup settings override client-side settings, then the query uses the location specified for the workgroup. If no query location is set, Athena issues an error. For more information, see [Work with query results and recent queries](https://docs.aws.amazon.com/athena/latest/ug/querying.html) and `EnforceWorkGroupConfiguration` ." }, "AWS::Athena::WorkGroup ResultConfigurationUpdates": { "AclConfiguration": "The ACL configuration for the query results.", @@ -3697,9 +3728,9 @@ "ExpectedBucketOwner": "The AWS account ID that you expect to be the owner of the Amazon S3 bucket specified by `ResultConfiguration$OutputLocation` . If set, Athena uses the value for `ExpectedBucketOwner` when it makes Amazon S3 calls to your specified output location. If the `ExpectedBucketOwner` AWS account ID does not match the actual owner of the Amazon S3 bucket, the call fails with a permissions error.\n\nIf workgroup settings override client-side settings, then the query uses the `ExpectedBucketOwner` setting that is specified for the workgroup, and also uses the location for storing query results specified in the workgroup. See `WorkGroupConfiguration$EnforceWorkGroupConfiguration` and [Workgroup Settings Override Client-Side Settings](https://docs.aws.amazon.com/athena/latest/ug/workgroups-settings-override.html) .", "OutputLocation": "The location in Amazon S3 where your query results are stored, such as `s3://path/to/query/bucket/` . For more information, see [Query Results](https://docs.aws.amazon.com/athena/latest/ug/querying.html) If workgroup settings override client-side settings, then the query uses the location for the query results and the encryption configuration that are specified for the workgroup. The \"workgroup settings override\" is specified in EnforceWorkGroupConfiguration (true/false) in the WorkGroupConfiguration. See `EnforceWorkGroupConfiguration` .", "RemoveAclConfiguration": "If set to `true` , indicates that the previously-specified ACL configuration for queries in this workgroup should be ignored and set to null. If set to `false` or not set, and a value is present in the `AclConfiguration` of `ResultConfigurationUpdates` , the `AclConfiguration` in the workgroup's `ResultConfiguration` is updated with the new value. For more information, see [Workgroup Settings Override Client-Side Settings](https://docs.aws.amazon.com/athena/latest/ug/workgroups-settings-override.html) .", - "RemoveEncryptionConfiguration": "If set to \"true\", indicates that the previously-specified encryption configuration (also known as the client-side setting) for queries in this workgroup should be ignored and set to null. If set to \"false\" or not set, and a value is present in the EncryptionConfiguration in ResultConfigurationUpdates (the client-side setting), the EncryptionConfiguration in the workgroup's ResultConfiguration will be updated with the new value. For more information, see [Workgroup Settings Override Client-Side Settings](https://docs.aws.amazon.com/athena/latest/ug/workgroups-settings-override.html) .", + "RemoveEncryptionConfiguration": "If set to \"true\", indicates that the previously-specified encryption configuration (also known as the client-side setting) for queries in this workgroup should be ignored and set to null. If set to \"false\" or not set, and a value is present in the EncryptionConfiguration in ResultConfigurationUpdates (the client-side setting), the EncryptionConfiguration in the workgroup's ResultConfiguration will be updated with the new value. For more information, see [Override Client-Side Settings](https://docs.aws.amazon.com/athena/latest/ug/workgroups-settings-override.html) .", "RemoveExpectedBucketOwner": "If set to \"true\", removes the AWS account ID previously specified for `ResultConfiguration$ExpectedBucketOwner` . If set to \"false\" or not set, and a value is present in the `ExpectedBucketOwner` in `ResultConfigurationUpdates` (the client-side setting), the `ExpectedBucketOwner` in the workgroup's `ResultConfiguration` is updated with the new value. For more information, see [Workgroup Settings Override Client-Side Settings](https://docs.aws.amazon.com/athena/latest/ug/workgroups-settings-override.html) .", - "RemoveOutputLocation": "If set to \"true\", indicates that the previously-specified query results location (also known as a client-side setting) for queries in this workgroup should be ignored and set to null. If set to \"false\" or not set, and a value is present in the OutputLocation in ResultConfigurationUpdates (the client-side setting), the OutputLocation in the workgroup's ResultConfiguration will be updated with the new value. For more information, see [Workgroup Settings Override Client-Side Settings](https://docs.aws.amazon.com/athena/latest/ug/workgroups-settings-override.html) ." + "RemoveOutputLocation": "If set to \"true\", indicates that the previously-specified query results location (also known as a client-side setting) for queries in this workgroup should be ignored and set to null. If set to \"false\" or not set, and a value is present in the OutputLocation in ResultConfigurationUpdates (the client-side setting), the OutputLocation in the workgroup's ResultConfiguration will be updated with the new value. For more information, see [Override Client-Side Settings](https://docs.aws.amazon.com/athena/latest/ug/workgroups-settings-override.html) ." }, "AWS::Athena::WorkGroup Tag": { "Key": "A tag key. The tag key length is from 1 to 128 Unicode characters in UTF-8. You can use letters and numbers representable in UTF-8, and the following characters: + - = . _ : / @. Tag keys are case-sensitive and must be unique per resource.", @@ -3709,12 +3740,12 @@ "AdditionalConfiguration": "Specifies a user defined JSON string that is passed to the session engine.", "BytesScannedCutoffPerQuery": "The upper limit (cutoff) for the amount of bytes a single query in a workgroup is allowed to scan. No default is defined.\n\n> This property currently supports integer types. Support for long values is planned.", "CustomerContentEncryptionConfiguration": "Specifies the KMS key that is used to encrypt the user's data stores in Athena. This setting does not apply to Athena SQL workgroups.", - "EnforceWorkGroupConfiguration": "If set to \"true\", the settings for the workgroup override client-side settings. If set to \"false\", client-side settings are used. For more information, see [Workgroup Settings Override Client-Side Settings](https://docs.aws.amazon.com/athena/latest/ug/workgroups-settings-override.html) .", + "EnforceWorkGroupConfiguration": "If set to \"true\", the settings for the workgroup override client-side settings. If set to \"false\", client-side settings are used. For more information, see [Override client-side settings](https://docs.aws.amazon.com/athena/latest/ug/workgroups-settings-override.html) .", "EngineVersion": "The engine version that all queries running on the workgroup use.", "ExecutionRole": "Role used to access user resources in an Athena for Apache Spark session. This property applies only to Spark-enabled workgroups in Athena.", "PublishCloudWatchMetricsEnabled": "Indicates that the Amazon CloudWatch metrics are enabled for the workgroup.", "RequesterPaysEnabled": "If set to `true` , allows members assigned to a workgroup to reference Amazon S3 Requester Pays buckets in queries. If set to `false` , workgroup members cannot query data from Requester Pays buckets, and queries that retrieve data from Requester Pays buckets cause an error. The default is `false` . For more information about Requester Pays buckets, see [Requester Pays Buckets](https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html) in the *Amazon Simple Storage Service Developer Guide* .", - "ResultConfiguration": "Specifies the location in Amazon S3 where query results are stored and the encryption option, if any, used for query results. For more information, see [Working with Query Results, Output Files, and Query History](https://docs.aws.amazon.com/athena/latest/ug/querying.html) ." + "ResultConfiguration": "Specifies the location in Amazon S3 where query results are stored and the encryption option, if any, used for query results. For more information, see [Work with query results and recent queries](https://docs.aws.amazon.com/athena/latest/ug/querying.html) ." }, "AWS::AuditManager::Assessment": { "AssessmentReportsDestination": "The destination that evidence reports are stored in for the assessment.", @@ -3775,7 +3806,7 @@ "DesiredCapacity": "The desired capacity is the initial capacity of the Auto Scaling group at the time of its creation and the capacity it attempts to maintain. It can scale beyond this capacity if you configure automatic scaling.\n\nThe number must be greater than or equal to the minimum size of the group and less than or equal to the maximum size of the group. If you do not specify a desired capacity when creating the stack, the default is the minimum size of the group.\n\nCloudFormation marks the Auto Scaling group as successful (by setting its status to CREATE_COMPLETE) when the desired capacity is reached. However, if a maximum Spot price is set in the launch template or launch configuration that you specified, then desired capacity is not used as a criteria for success. Whether your request is fulfilled depends on Spot Instance capacity and your maximum price.", "DesiredCapacityType": "The unit of measurement for the value specified for desired capacity. Amazon EC2 Auto Scaling supports `DesiredCapacityType` for attribute-based instance type selection only. For more information, see [Create a mixed instances group using attribute-based instance type selection](https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-mixed-instances-group-attribute-based-instance-type-selection.html) in the *Amazon EC2 Auto Scaling User Guide* .\n\nBy default, Amazon EC2 Auto Scaling specifies `units` , which translates into number of instances.\n\nValid values: `units` | `vcpu` | `memory-mib`", "HealthCheckGracePeriod": "The amount of time, in seconds, that Amazon EC2 Auto Scaling waits before checking the health status of an EC2 instance that has come into service and marking it unhealthy due to a failed health check. This is useful if your instances do not immediately pass their health checks after they enter the `InService` state. For more information, see [Set the health check grace period for an Auto Scaling group](https://docs.aws.amazon.com/autoscaling/ec2/userguide/health-check-grace-period.html) in the *Amazon EC2 Auto Scaling User Guide* .\n\nDefault: `0` seconds", - "HealthCheckType": "A comma-separated value string of one or more health check types.\n\nThe valid values are `EC2` , `ELB` , and `VPC_LATTICE` . `EC2` is the default health check and cannot be disabled. For more information, see [Health checks for instances in an Auto Scaling group](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-health-checks.html) in the *Amazon EC2 Auto Scaling User Guide* .\n\nOnly specify `EC2` if you must clear a value that was previously set.", + "HealthCheckType": "A comma-separated value string of one or more health check types.\n\nThe valid values are `EC2` , `EBS` , `ELB` , and `VPC_LATTICE` . `EC2` is the default health check and cannot be disabled. For more information, see [Health checks for instances in an Auto Scaling group](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-health-checks.html) in the *Amazon EC2 Auto Scaling User Guide* .\n\nOnly specify `EC2` if you must clear a value that was previously set.", "InstanceId": "The ID of the instance used to base the launch configuration on. For more information, see [Create an Auto Scaling group using an EC2 instance](https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-asg-from-instance.html) in the *Amazon EC2 Auto Scaling User Guide* .\n\nIf you specify `LaunchTemplate` , `MixedInstancesPolicy` , or `LaunchConfigurationName` , don't specify `InstanceId` .", "InstanceMaintenancePolicy": "An instance maintenance policy. For more information, see [Set instance maintenance policy](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-instance-maintenance-policy.html) in the *Amazon EC2 Auto Scaling User Guide* .", "LaunchConfigurationName": "The name of the launch configuration to use to launch instances.\n\nRequired only if you don't specify `LaunchTemplate` , `MixedInstancesPolicy` , or `InstanceId` .", @@ -4162,13 +4193,14 @@ "Edi": "An EDI (electronic data interchange) configuration object." }, "AWS::B2BI::Capability EdiConfiguration": { + "CapabilityDirection": "Specifies whether this is capability is for inbound or outbound transformations.", "InputLocation": "Contains the Amazon S3 bucket and prefix for the location of the input file, which is contained in an `S3Location` object.", "OutputLocation": "Contains the Amazon S3 bucket and prefix for the location of the output file, which is contained in an `S3Location` object.", "TransformerId": "Returns the system-assigned unique identifier for the transformer.", "Type": "Returns the type of the capability. Currently, only `edi` is supported." }, "AWS::B2BI::Capability EdiType": { - "X12Details": "Returns the details for the EDI standard that is being used for the transformer. Currently, only X12 is supported. X12 is a set of standards and corresponding messages that define specific business documents." + "X12Details": "" }, "AWS::B2BI::Capability S3Location": { "BucketName": "Specifies the name of the Amazon S3 bucket.", @@ -4184,16 +4216,51 @@ }, "AWS::B2BI::Partnership": { "Capabilities": "Returns one or more capabilities associated with this partnership.", + "CapabilityOptions": "Contains the details for an Outbound EDI capability.", "Email": "", "Name": "Returns the name of the partnership.", "Phone": "", "ProfileId": "Returns the unique, system-generated identifier for the profile connected to this partnership.", "Tags": "A key-value pair for a specific partnership. Tags are metadata that you can use to search for and group capabilities for various purposes." }, + "AWS::B2BI::Partnership CapabilityOptions": { + "OutboundEdi": "A structure that contains the outbound EDI options." + }, + "AWS::B2BI::Partnership OutboundEdiOptions": { + "X12": "A structure that contains an X12 envelope structure." + }, "AWS::B2BI::Partnership Tag": { "Key": "Specifies the name assigned to the tag that you create.", "Value": "Contains one or more values that you assigned to the key name that you create." }, + "AWS::B2BI::Partnership X12Delimiters": { + "ComponentSeparator": "", + "DataElementSeparator": "", + "SegmentTerminator": "" + }, + "AWS::B2BI::Partnership X12Envelope": { + "Common": "A container for the X12 outbound EDI headers." + }, + "AWS::B2BI::Partnership X12FunctionalGroupHeaders": { + "ApplicationReceiverCode": "", + "ApplicationSenderCode": "", + "ResponsibleAgencyCode": "" + }, + "AWS::B2BI::Partnership X12InterchangeControlHeaders": { + "AcknowledgmentRequestedCode": "", + "ReceiverId": "", + "ReceiverIdQualifier": "", + "RepetitionSeparator": "", + "SenderId": "", + "SenderIdQualifier": "", + "UsageIndicatorCode": "" + }, + "AWS::B2BI::Partnership X12OutboundEdiHeaders": { + "Delimiters": "The delimiters, for example semicolon ( `;` ), that separates sections of the headers for the X12 object.", + "FunctionalGroupHeaders": "The functional group headers for the X12 object.", + "InterchangeControlHeaders": "In X12 EDI messages, delimiters are used to mark the end of segments or elements, and are defined in the interchange control header.", + "ValidateEdi": "Specifies whether or not to validate the EDI for this X12 object: `TRUE` or `FALSE` ." + }, "AWS::B2BI::Profile": { "BusinessName": "Returns the name for the business associated with this profile.", "Email": "", @@ -4207,16 +4274,36 @@ "Value": "Contains one or more values that you assigned to the key name that you create." }, "AWS::B2BI::Transformer": { - "EdiType": "Returns the details for the EDI standard that is being used for the transformer. Currently, only X12 is supported. X12 is a set of standards and corresponding messages that define specific business documents.", - "FileFormat": "Returns that the currently supported file formats for EDI transformations are `JSON` and `XML` .", - "MappingTemplate": "Returns a sample EDI document that is used by a transformer as a guide for processing the EDI data.", + "InputConversion": "Returns a structure that contains the format options for the transformation.", + "Mapping": "Returns the structure that contains the mapping template and its language (either XSLT or JSONATA).", "Name": "Returns the descriptive name for the transformer.", - "SampleDocument": "Returns a sample EDI document that is used by a transformer as a guide for processing the EDI data.", + "OutputConversion": "Returns the `OutputConversion` object, which contains the format options for the outbound transformation.", + "SampleDocuments": "Returns a structure that contains the Amazon S3 bucket and an array of the corresponding keys used to identify the location for your sample documents.", "Status": "Returns the state of the newly created transformer. The transformer can be either `active` or `inactive` . For the transformer to be used in a capability, its status must `active` .", "Tags": "A key-value pair for a specific transformer. Tags are metadata that you can use to search for and group capabilities for various purposes." }, - "AWS::B2BI::Transformer EdiType": { - "X12Details": "Returns the details for the EDI standard that is being used for the transformer. Currently, only X12 is supported. X12 is a set of standards and corresponding messages that define specific business documents." + "AWS::B2BI::Transformer FormatOptions": { + "X12": "" + }, + "AWS::B2BI::Transformer InputConversion": { + "FormatOptions": "", + "FromFormat": "" + }, + "AWS::B2BI::Transformer Mapping": { + "Template": "", + "TemplateLanguage": "" + }, + "AWS::B2BI::Transformer OutputConversion": { + "FormatOptions": "", + "ToFormat": "" + }, + "AWS::B2BI::Transformer SampleDocumentKeys": { + "Input": "", + "Output": "" + }, + "AWS::B2BI::Transformer SampleDocuments": { + "BucketName": "", + "Keys": "" }, "AWS::B2BI::Transformer Tag": { "Key": "Specifies the name assigned to the tag that you create.", @@ -4327,7 +4414,7 @@ }, "AWS::Backup::BackupVault": { "AccessPolicy": "A resource-based policy that is used to manage access permissions on the target backup vault.", - "BackupVaultName": "The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the AWS Region where they are created. They consist of lowercase letters, numbers, and hyphens.", + "BackupVaultName": "The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the AWS Region where they are created.", "BackupVaultTags": "The tags to assign to the backup vault.", "EncryptionKeyArn": "A server-side encryption key you can specify to encrypt your backups from services that support full AWS Backup management; for example, `arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab` . If you specify a key, you must specify its ARN, not its alias. If you do not specify a key, AWS Backup creates a KMS key for you by default.\n\nTo learn which AWS Backup services support full AWS Backup management and how AWS Backup handles encryption for backups from services that do not yet support full AWS Backup , see [Encryption for backups in AWS Backup](https://docs.aws.amazon.com/aws-backup/latest/devguide/encryption.html)", "LockConfiguration": "Configuration for [AWS Backup Vault Lock](https://docs.aws.amazon.com/aws-backup/latest/devguide/vault-lock.html) .", @@ -4394,6 +4481,7 @@ "RestoreTestingPlanName": "The RestoreTestingPlanName is a unique string that is the name of the restore testing plan. This cannot be changed after creation, and it must consist of only alphanumeric characters and underscores.", "ScheduleExpression": "A CRON expression in specified timezone when a restore testing plan is executed.", "ScheduleExpressionTimezone": "Optional. This is the timezone in which the schedule expression is set. By default, ScheduleExpressions are in UTC. You can modify this to a specified timezone.", + "ScheduleStatus": "", "StartWindowHours": "Defaults to 24 hours.\n\nA value in hours after a restore test is scheduled before a job will be canceled if it doesn't start successfully. This value is optional. If this value is included, this parameter has a maximum value of 168 hours (one week).", "Tags": "Optional tags to include. A tag is a key-value pair you can use to manage, filter, and search for your resources. Allowed characters include UTF-8 letters,numbers, spaces, and the following characters: `+ - = . _ : /.`" }, @@ -4442,6 +4530,7 @@ "AWS::Batch::ComputeEnvironment": { "ComputeEnvironmentName": "The name for your compute environment. It can be up to 128 characters long. It can contain uppercase and lowercase letters, numbers, hyphens (-), and underscores (_).", "ComputeResources": "The ComputeResources property type specifies details of the compute resources managed by the compute environment. This parameter is required for managed compute environments. For more information, see [Compute Environments](https://docs.aws.amazon.com/batch/latest/userguide/compute_environments.html) in the ** .", + "Context": "Reserved.", "EksConfiguration": "The details for the Amazon EKS cluster that supports the compute environment.", "ReplaceComputeEnvironment": "Specifies whether the compute environment is replaced if an update is made that requires replacing the instances in the compute environment. The default value is `true` . To enable more properties to be updated, set this property to `false` . When changing the value of this property to `false` , do not change any other properties at the same time. If other properties are changed at the same time, and the change needs to be rolled back but it can't, it's possible for the stack to go into the `UPDATE_ROLLBACK_FAILED` state. You can't update a stack that is in the `UPDATE_ROLLBACK_FAILED` state. However, if you can continue to roll it back, you can return the stack to its original settings and then try to update it again. For more information, see [Continue rolling back an update](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-continueupdaterollback.html) in the *AWS CloudFormation User Guide* .\n\nThe properties that can't be changed without replacing the compute environment are in the [`ComputeResources`](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html) property type: [`AllocationStrategy`](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-allocationstrategy) , [`BidPercentage`](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-bidpercentage) , [`Ec2Configuration`](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-ec2configuration) , [`Ec2KeyPair`](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-ec2keypair) , [`Ec2KeyPair`](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-ec2keypair) , [`ImageId`](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-imageid) , [`InstanceRole`](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-instancerole) , [`InstanceTypes`](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-instancetypes) , [`LaunchTemplate`](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-launchtemplate) , [`MaxvCpus`](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-maxvcpus) , [`MinvCpus`](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-minvcpus) , [`PlacementGroup`](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-placementgroup) , [`SecurityGroupIds`](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-securitygroupids) , [`Subnets`](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-subnets) , [Tags](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-tags) , [`Type`](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-type) , and [`UpdateToLatestImageVersion`](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-computeenvironment-computeresources.html#cfn-batch-computeenvironment-computeresources-updatetolatestimageversion) .", "ServiceRole": "The full Amazon Resource Name (ARN) of the IAM role that allows AWS Batch to make calls to other AWS services on your behalf. For more information, see [AWS Batch service IAM role](https://docs.aws.amazon.com/batch/latest/userguide/service_IAM_role.html) in the *AWS Batch User Guide* .\n\n> If your account already created the AWS Batch service-linked role, that role is used by default for your compute environment unless you specify a different role here. If the AWS Batch service-linked role doesn't exist in your account, and no role is specified here, the service attempts to create the AWS Batch service-linked role in your account. \n\nIf your specified role has a path other than `/` , then you must specify either the full role ARN (recommended) or prefix the role name with the path. For example, if a role with the name `bar` has a path of `/foo/` , specify `/foo/bar` as the role name. For more information, see [Friendly names and paths](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html#identifiers-friendly-names) in the *IAM User Guide* .\n\n> Depending on how you created your AWS Batch service role, its ARN might contain the `service-role` path prefix. When you only specify the name of the service role, AWS Batch assumes that your ARN doesn't use the `service-role` path prefix. Because of this, we recommend that you specify the full ARN of your service role when you create compute environments.", @@ -4539,7 +4628,7 @@ "Permissions": "The explicit permissions to provide to the container for the device. By default, the container has permissions for `read` , `write` , and `mknod` for the device." }, "AWS::Batch::JobDefinition EcsProperties": { - "TaskProperties": "An object that contains the properties for the Amazon ECS task definition of a job.\n\n> This object is currently limited to one element." + "TaskProperties": "An object that contains the properties for the Amazon ECS task definition of a job.\n\n> This object is currently limited to one task element. However, the task element can run up to 10 containers." }, "AWS::Batch::JobDefinition EcsTaskProperties": { "Containers": "This object is a list of containers.", @@ -4663,15 +4752,16 @@ "AWS::Batch::JobDefinition NodeRangeProperty": { "Container": "The container details for the node range.", "EcsProperties": "This is an object that represents the properties of the node range for a multi-node parallel job.", + "EksProperties": "This is an object that represents the properties of the node range for a multi-node parallel job.", "InstanceTypes": "The instance types of the underlying host infrastructure of a multi-node parallel job.\n\n> This parameter isn't applicable to jobs that are running on Fargate resources.\n> \n> In addition, this list object is currently limited to one element.", "TargetNodes": "The range of nodes, using node index values. A range of `0:3` indicates nodes with index values of `0` through `3` . If the starting range value is omitted ( `:n` ), then `0` is used to start the range. If the ending range value is omitted ( `n:` ), then the highest possible node index is used to end the range. Your accumulative node ranges must account for all nodes ( `0:n` ). You can nest node ranges (for example, `0:10` and `4:5` ). In this case, the `4:5` range properties override the `0:10` properties." }, "AWS::Batch::JobDefinition PodProperties": { - "Containers": "The properties of the container that's used on the Amazon EKS pod.", + "Containers": "The properties of the container that's used on the Amazon EKS pod.\n\n> This object is limited to 10 elements.", "DnsPolicy": "The DNS policy for the pod. The default value is `ClusterFirst` . If the `hostNetwork` parameter is not specified, the default is `ClusterFirstWithHostNet` . `ClusterFirst` indicates that any DNS query that does not match the configured cluster domain suffix is forwarded to the upstream nameserver inherited from the node. For more information, see [Pod's DNS policy](https://docs.aws.amazon.com/https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy) in the *Kubernetes documentation* .\n\nValid values: `Default` | `ClusterFirst` | `ClusterFirstWithHostNet`", "HostNetwork": "Indicates if the pod uses the hosts' network IP address. The default value is `true` . Setting this to `false` enables the Kubernetes pod networking model. Most AWS Batch workloads are egress-only and don't require the overhead of IP allocation for each pod for incoming connections. For more information, see [Host namespaces](https://docs.aws.amazon.com/https://kubernetes.io/docs/concepts/security/pod-security-policy/#host-namespaces) and [Pod networking](https://docs.aws.amazon.com/https://kubernetes.io/docs/concepts/workloads/pods/#pod-networking) in the *Kubernetes documentation* .", "ImagePullSecrets": "", - "InitContainers": "These containers run before application containers, always runs to completion, and must complete successfully before the next container starts. These containers are registered with the Amazon EKS Connector agent and persists the registration information in the Kubernetes backend data store. For more information, see [Init Containers](https://docs.aws.amazon.com/https://kubernetes.io/docs/concepts/workloads/pods/init-containers/) in the *Kubernetes documentation* .\n\n> This object is limited to 10 elements", + "InitContainers": "These containers run before application containers, always runs to completion, and must complete successfully before the next container starts. These containers are registered with the Amazon EKS Connector agent and persists the registration information in the Kubernetes backend data store. For more information, see [Init Containers](https://docs.aws.amazon.com/https://kubernetes.io/docs/concepts/workloads/pods/init-containers/) in the *Kubernetes documentation* .\n\n> This object is limited to 10 elements.", "Metadata": "Metadata about the Kubernetes pod. For more information, see [Understanding Kubernetes Objects](https://docs.aws.amazon.com/https://kubernetes.io/docs/concepts/overview/working-with-objects/kubernetes-objects/) in the *Kubernetes documentation* .", "ServiceAccountName": "The name of the service account that's used to run the pod. For more information, see [Kubernetes service accounts](https://docs.aws.amazon.com/eks/latest/userguide/service-accounts.html) and [Configure a Kubernetes service account to assume an IAM role](https://docs.aws.amazon.com/eks/latest/userguide/associate-service-account-role.html) in the *Amazon EKS User Guide* and [Configure service accounts for pods](https://docs.aws.amazon.com/https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) in the *Kubernetes documentation* .", "ShareProcessNamespace": "Indicates if the processes in a container are shared, or visible, to other containers in the same pod. For more information, see [Share Process Namespace between Containers in a Pod](https://docs.aws.amazon.com/https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/) .", @@ -4845,7 +4935,7 @@ "PromptType": "The step in the agent sequence that this prompt configuration applies to." }, "AWS::Bedrock::Agent PromptOverrideConfiguration": { - "OverrideLambda": "The ARN of the Lambda function to use when parsing the raw foundation model output in parts of the agent sequence. If you specify this field, at least one of the `promptConfigurations` must contain a `parserMode` value that is set to `OVERRIDDEN` . For more information, see [Parser Lambda function in Agents for Amazon Bedrock](https://docs.aws.amazon.com/bedrock/latest/userguide/lambda-parser.html) .", + "OverrideLambda": "The ARN of the Lambda function to use when parsing the raw foundation model output in parts of the agent sequence. If you specify this field, at least one of the `promptConfigurations` must contain a `parserMode` value that is set to `OVERRIDDEN` . For more information, see [Parser Lambda function in Amazon Bedrock Agents](https://docs.aws.amazon.com/bedrock/latest/userguide/lambda-parser.html) .", "PromptConfigurations": "Contains configurations to override a prompt template in one part of an agent sequence. For more information, see [Advanced prompts](https://docs.aws.amazon.com/bedrock/latest/userguide/advanced-prompts.html) ." }, "AWS::Bedrock::Agent S3Identifier": { @@ -4876,33 +4966,447 @@ "ServerSideEncryptionConfiguration": "Contains details about the configuration of the server-side encryption.", "VectorIngestionConfiguration": "Contains details about how to ingest the documents in the data source." }, + "AWS::Bedrock::DataSource BedrockFoundationModelConfiguration": { + "ModelArn": "The ARN of the foundation model or [inference profile](https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference.html) .", + "ParsingPrompt": "Instructions for interpreting the contents of a document." + }, "AWS::Bedrock::DataSource ChunkingConfiguration": { "ChunkingStrategy": "Knowledge base can split your source data into chunks. A *chunk* refers to an excerpt from a data source that is returned when the knowledge base that it belongs to is queried. You have the following options for chunking your data. If you opt for `NONE` , then you may want to pre-process your files by splitting them up such that each file corresponds to a chunk.\n\n- `FIXED_SIZE` \u2013 Amazon Bedrock splits your source data into chunks of the approximate size that you set in the `fixedSizeChunkingConfiguration` .\n- `HIERARCHICAL` \u2013 Split documents into layers of chunks where the first layer contains large chunks, and the second layer contains smaller chunks derived from the first layer.\n- `SEMANTIC` \u2013 Split documents into chunks based on groups of similar content derived with natural language processing.\n- `NONE` \u2013 Amazon Bedrock treats each file as one chunk. If you choose this option, you may want to pre-process your documents by splitting them into separate files.", - "FixedSizeChunkingConfiguration": "Configurations for when you choose fixed-size chunking. If you set the `chunkingStrategy` as `NONE` , exclude this field." + "FixedSizeChunkingConfiguration": "Configurations for when you choose fixed-size chunking. If you set the `chunkingStrategy` as `NONE` , exclude this field.", + "HierarchicalChunkingConfiguration": "Settings for hierarchical document chunking for a data source. Hierarchical chunking splits documents into layers of chunks where the first layer contains large chunks, and the second layer contains smaller chunks derived from the first layer.", + "SemanticChunkingConfiguration": "Settings for semantic document chunking for a data source. Semantic chunking splits a document into into smaller documents based on groups of similar content derived from the text with natural language processing." + }, + "AWS::Bedrock::DataSource ConfluenceCrawlerConfiguration": { + "FilterConfiguration": "The configuration of filtering the Confluence content. For example, configuring regular expression patterns to include or exclude certain content." + }, + "AWS::Bedrock::DataSource ConfluenceDataSourceConfiguration": { + "CrawlerConfiguration": "The configuration of the Confluence content. For example, configuring specific types of Confluence content.", + "SourceConfiguration": "The endpoint information to connect to your Confluence data source." + }, + "AWS::Bedrock::DataSource ConfluenceSourceConfiguration": { + "AuthType": "The supported authentication type to authenticate and connect to your Confluence instance.", + "CredentialsSecretArn": "The Amazon Resource Name of an AWS Secrets Manager secret that stores your authentication credentials for your Confluence instance URL. For more information on the key-value pairs that must be included in your secret, depending on your authentication type, see [Confluence connection configuration](https://docs.aws.amazon.com/bedrock/latest/userguide/confluence-data-source-connector.html#configuration-confluence-connector) .", + "HostType": "The supported host type, whether online/cloud or server/on-premises.", + "HostUrl": "The Confluence host URL or instance URL." + }, + "AWS::Bedrock::DataSource CrawlFilterConfiguration": { + "PatternObjectFilter": "The configuration of filtering certain objects or content types of the data source.", + "Type": "The type of filtering that you want to apply to certain objects or content of the data source. For example, the `PATTERN` type is regular expression patterns you can apply to filter your content." + }, + "AWS::Bedrock::DataSource CustomTransformationConfiguration": { + "IntermediateStorage": "An S3 bucket path for input and output objects.", + "Transformations": "A Lambda function that processes documents." }, "AWS::Bedrock::DataSource DataSourceConfiguration": { + "ConfluenceConfiguration": "The configuration information to connect to Confluence as your data source.\n\n> Confluence data source connector is in preview release and is subject to change.", "S3Configuration": "The configuration information to connect to Amazon S3 as your data source.", - "Type": "The type of data source." + "SalesforceConfiguration": "The configuration information to connect to Salesforce as your data source.\n\n> Salesforce data source connector is in preview release and is subject to change.", + "SharePointConfiguration": "The configuration information to connect to SharePoint as your data source.\n\n> SharePoint data source connector is in preview release and is subject to change.", + "Type": "The type of data source.", + "WebConfiguration": "The configuration of web URLs to crawl for your data source. You should be authorized to crawl the URLs.\n\n> Crawling web URLs as your data source is in preview release and is subject to change." }, "AWS::Bedrock::DataSource FixedSizeChunkingConfiguration": { "MaxTokens": "The maximum number of tokens to include in a chunk.", "OverlapPercentage": "The percentage of overlap between adjacent chunks of a data source." }, + "AWS::Bedrock::DataSource HierarchicalChunkingConfiguration": { + "LevelConfigurations": "Token settings for each layer.", + "OverlapTokens": "The number of tokens to repeat across chunks in the same layer." + }, + "AWS::Bedrock::DataSource HierarchicalChunkingLevelConfiguration": { + "MaxTokens": "The maximum number of tokens that a chunk can contain in this layer." + }, + "AWS::Bedrock::DataSource IntermediateStorage": { + "S3Location": "An S3 bucket path." + }, + "AWS::Bedrock::DataSource ParsingConfiguration": { + "BedrockFoundationModelConfiguration": "Settings for a foundation model used to parse documents for a data source.", + "ParsingStrategy": "The parsing strategy for the data source." + }, + "AWS::Bedrock::DataSource ParsingPrompt": { + "ParsingPromptText": "Instructions for interpreting the contents of a document." + }, + "AWS::Bedrock::DataSource PatternObjectFilter": { + "ExclusionFilters": "A list of one or more exclusion regular expression patterns to exclude certain object types that adhere to the pattern. If you specify an inclusion and exclusion filter/pattern and both match a document, the exclusion filter takes precedence and the document isn\u2019t crawled.", + "InclusionFilters": "A list of one or more inclusion regular expression patterns to include certain object types that adhere to the pattern. If you specify an inclusion and exclusion filter/pattern and both match a document, the exclusion filter takes precedence and the document isn\u2019t crawled.", + "ObjectType": "The supported object type or content type of the data source." + }, + "AWS::Bedrock::DataSource PatternObjectFilterConfiguration": { + "Filters": "The configuration of specific filters applied to your data source content. You can filter out or include certain content." + }, "AWS::Bedrock::DataSource S3DataSourceConfiguration": { "BucketArn": "The Amazon Resource Name (ARN) of the S3 bucket that contains your data.", "BucketOwnerAccountId": "The account ID for the owner of the S3 bucket.", "InclusionPrefixes": "A list of S3 prefixes to include certain files or content. For more information, see [Organizing objects using prefixes](https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-prefixes.html) ." }, + "AWS::Bedrock::DataSource S3Location": { + "URI": "The location's URI. For example, `s3://my-bucket/chunk-processor/` ." + }, + "AWS::Bedrock::DataSource SalesforceCrawlerConfiguration": { + "FilterConfiguration": "The configuration of filtering the Salesforce content. For example, configuring regular expression patterns to include or exclude certain content." + }, + "AWS::Bedrock::DataSource SalesforceDataSourceConfiguration": { + "CrawlerConfiguration": "The configuration of the Salesforce content. For example, configuring specific types of Salesforce content.", + "SourceConfiguration": "The endpoint information to connect to your Salesforce data source." + }, + "AWS::Bedrock::DataSource SalesforceSourceConfiguration": { + "AuthType": "The supported authentication type to authenticate and connect to your Salesforce instance.", + "CredentialsSecretArn": "The Amazon Resource Name of an AWS Secrets Manager secret that stores your authentication credentials for your Salesforce instance URL. For more information on the key-value pairs that must be included in your secret, depending on your authentication type, see [Salesforce connection configuration](https://docs.aws.amazon.com/bedrock/latest/userguide/salesforce-data-source-connector.html#configuration-salesforce-connector) .", + "HostUrl": "The Salesforce host URL or instance URL." + }, + "AWS::Bedrock::DataSource SeedUrl": { + "Url": "A seed or starting point URL." + }, + "AWS::Bedrock::DataSource SemanticChunkingConfiguration": { + "BreakpointPercentileThreshold": "The dissimilarity threshold for splitting chunks.", + "BufferSize": "The buffer size.", + "MaxTokens": "The maximum number of tokens that a chunk can contain." + }, "AWS::Bedrock::DataSource ServerSideEncryptionConfiguration": { "KmsKeyArn": "The Amazon Resource Name (ARN) of the AWS KMS key used to encrypt the resource." }, + "AWS::Bedrock::DataSource SharePointCrawlerConfiguration": { + "FilterConfiguration": "The configuration of filtering the SharePoint content. For example, configuring regular expression patterns to include or exclude certain content." + }, + "AWS::Bedrock::DataSource SharePointDataSourceConfiguration": { + "CrawlerConfiguration": "The configuration of the SharePoint content. For example, configuring specific types of SharePoint content.", + "SourceConfiguration": "The endpoint information to connect to your SharePoint data source." + }, + "AWS::Bedrock::DataSource SharePointSourceConfiguration": { + "AuthType": "The supported authentication type to authenticate and connect to your SharePoint site/sites.", + "CredentialsSecretArn": "The Amazon Resource Name of an AWS Secrets Manager secret that stores your authentication credentials for your SharePoint site/sites. For more information on the key-value pairs that must be included in your secret, depending on your authentication type, see [SharePoint connection configuration](https://docs.aws.amazon.com/bedrock/latest/userguide/sharepoint-data-source-connector.html#configuration-sharepoint-connector) .", + "Domain": "The domain of your SharePoint instance or site URL/URLs.", + "HostType": "The supported host type, whether online/cloud or server/on-premises.", + "SiteUrls": "A list of one or more SharePoint site URLs.", + "TenantId": "The identifier of your Microsoft 365 tenant." + }, + "AWS::Bedrock::DataSource Transformation": { + "StepToApply": "When the service applies the transformation.", + "TransformationFunction": "A Lambda function that processes documents." + }, + "AWS::Bedrock::DataSource TransformationFunction": { + "TransformationLambdaConfiguration": "The Lambda function." + }, + "AWS::Bedrock::DataSource TransformationLambdaConfiguration": { + "LambdaArn": "The function's ARN identifier." + }, + "AWS::Bedrock::DataSource UrlConfiguration": { + "SeedUrls": "One or more seed or starting point URLs." + }, "AWS::Bedrock::DataSource VectorIngestionConfiguration": { - "ChunkingConfiguration": "Details about how to chunk the documents in the data source. A *chunk* refers to an excerpt from a data source that is returned when the knowledge base that it belongs to is queried." + "ChunkingConfiguration": "Details about how to chunk the documents in the data source. A *chunk* refers to an excerpt from a data source that is returned when the knowledge base that it belongs to is queried.", + "CustomTransformationConfiguration": "A custom document transformer for parsed data source documents.", + "ParsingConfiguration": "A custom parser for data source documents." + }, + "AWS::Bedrock::DataSource WebCrawlerConfiguration": { + "CrawlerLimits": "The configuration of crawl limits for the web URLs.", + "ExclusionFilters": "A list of one or more exclusion regular expression patterns to exclude certain URLs. If you specify an inclusion and exclusion filter/pattern and both match a URL, the exclusion filter takes precedence and the web content of the URL isn\u2019t crawled.", + "InclusionFilters": "A list of one or more inclusion regular expression patterns to include certain URLs. If you specify an inclusion and exclusion filter/pattern and both match a URL, the exclusion filter takes precedence and the web content of the URL isn\u2019t crawled.", + "Scope": "The scope of what is crawled for your URLs.\n\nYou can choose to crawl only web pages that belong to the same host or primary domain. For example, only web pages that contain the seed URL \"https://docs.aws.amazon.com/bedrock/latest/userguide/\" and no other domains. You can choose to include sub domains in addition to the host or primary domain. For example, web pages that contain \"aws.amazon.com\" can also include sub domain \"docs.aws.amazon.com\"." + }, + "AWS::Bedrock::DataSource WebCrawlerLimits": { + "RateLimit": "The max rate at which pages are crawled, up to 300 per minute per host." + }, + "AWS::Bedrock::DataSource WebDataSourceConfiguration": { + "CrawlerConfiguration": "The Web Crawler configuration details for the web data source.", + "SourceConfiguration": "The source configuration details for the web data source." + }, + "AWS::Bedrock::DataSource WebSourceConfiguration": { + "UrlConfiguration": "The configuration of the URL/URLs." + }, + "AWS::Bedrock::Flow": { + "CustomerEncryptionKeyArn": "The Amazon Resource Name (ARN) of the KMS key that the flow is encrypted with.", + "Definition": "The definition of the nodes and connections between the nodes in the flow.", + "DefinitionS3Location": "The Amazon S3 location of the flow definition.", + "DefinitionString": "The definition of the flow as a JSON-formatted string. The string must match the format in [FlowDefinition](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-bedrock-flow-flowdefinition.html) .", + "DefinitionSubstitutions": "A map that specifies the mappings for placeholder variables in the prompt flow definition. This enables the customer to inject values obtained at runtime. Variables can be template parameter names, resource logical IDs, resource attributes, or a variable in a key-value map. Only supported with the `DefinitionString` and `DefinitionS3Location` fields.\n\nSubstitutions must follow the syntax: `${key_name}` or `${variable_1,variable_2,...}` .", + "Description": "A description of the flow.", + "ExecutionRoleArn": "The Amazon Resource Name (ARN) of the service role with permissions to create a flow. For more information, see [Create a service row for flows](https://docs.aws.amazon.com/bedrock/latest/userguide/flows-permissions.html) in the Amazon Bedrock User Guide.", + "Name": "The name of the flow.", + "Tags": "Metadata that you can assign to a resource as key-value pairs. For more information, see the following resources:\n\n- [Tag naming limits and requirements](https://docs.aws.amazon.com/tag-editor/latest/userguide/tagging.html#tag-conventions)\n- [Tagging best practices](https://docs.aws.amazon.com/tag-editor/latest/userguide/tagging.html#tag-best-practices)", + "TestAliasTags": "" + }, + "AWS::Bedrock::Flow AgentFlowNodeConfiguration": { + "AgentAliasArn": "The Amazon Resource Name (ARN) of the alias of the agent to invoke." + }, + "AWS::Bedrock::Flow ConditionFlowNodeConfiguration": { + "Conditions": "An array of conditions. Each member contains the name of a condition and an expression that defines the condition." + }, + "AWS::Bedrock::Flow FlowCondition": { + "Expression": "Defines the condition. You must refer to at least one of the inputs in the condition. For more information, expand the Condition node section in [Node types in prompt flows](https://docs.aws.amazon.com/bedrock/latest/userguide/flows-how-it-works.html#flows-nodes) .", + "Name": "A name for the condition that you can reference." + }, + "AWS::Bedrock::Flow FlowConditionalConnectionConfiguration": { + "Condition": "The condition that triggers this connection. For more information about how to write conditions, see the *Condition* node type in the [Node types](https://docs.aws.amazon.com/bedrock/latest/userguide/node-types.html) topic in the Amazon Bedrock User Guide." + }, + "AWS::Bedrock::Flow FlowConnection": { + "Configuration": "The configuration of the connection.", + "Name": "A name for the connection that you can reference.", + "Source": "The node that the connection starts at.", + "Target": "The node that the connection ends at.", + "Type": "Whether the source node that the connection begins from is a condition node ( `Conditional` ) or not ( `Data` )." + }, + "AWS::Bedrock::Flow FlowConnectionConfiguration": { + "Conditional": "The configuration of a connection originating from a Condition node.", + "Data": "The configuration of a connection originating from a node that isn't a Condition node." + }, + "AWS::Bedrock::Flow FlowDataConnectionConfiguration": { + "SourceOutput": "The name of the output in the source node that the connection begins from.", + "TargetInput": "The name of the input in the target node that the connection ends at." + }, + "AWS::Bedrock::Flow FlowDefinition": { + "Connections": "An array of connection definitions in the flow.", + "Nodes": "An array of node definitions in the flow." + }, + "AWS::Bedrock::Flow FlowNode": { + "Configuration": "Contains configurations for the node.", + "Inputs": "An array of objects, each of which contains information about an input into the node.", + "Name": "A name for the node.", + "Outputs": "A list of objects, each of which contains information about an output from the node.", + "Type": "The type of node. This value must match the name of the key that you provide in the configuration you provide in the `FlowNodeConfiguration` field." + }, + "AWS::Bedrock::Flow FlowNodeConfiguration": { + "Agent": "Contains configurations for an agent node in your flow. Invokes an alias of an agent and returns the response.", + "Collector": "Contains configurations for a collector node in your flow. Collects an iteration of inputs and consolidates them into an array of outputs.", + "Condition": "Contains configurations for a Condition node in your flow. Defines conditions that lead to different branches of the flow.", + "Input": "Contains configurations for an input flow node in your flow. The first node in the flow. `inputs` can't be specified for this node.", + "Iterator": "Contains configurations for an iterator node in your flow. Takes an input that is an array and iteratively sends each item of the array as an output to the following node. The size of the array is also returned in the output.\n\nThe output flow node at the end of the flow iteration will return a response for each member of the array. To return only one response, you can include a collector node downstream from the iterator node.", + "KnowledgeBase": "Contains configurations for a knowledge base node in your flow. Queries a knowledge base and returns the retrieved results or generated response.", + "LambdaFunction": "Contains configurations for a Lambda function node in your flow. Invokes an AWS Lambda function.", + "Lex": "Contains configurations for a Lex node in your flow. Invokes an Amazon Lex bot to identify the intent of the input and return the intent as the output.", + "Output": "Contains configurations for an output flow node in your flow. The last node in the flow. `outputs` can't be specified for this node.", + "Prompt": "Contains configurations for a prompt node in your flow. Runs a prompt and generates the model response as the output. You can use a prompt from Prompt management or you can configure one in this node.", + "Retrieval": "Contains configurations for a Retrieval node in your flow. Retrieves data from an Amazon S3 location and returns it as the output.", + "Storage": "Contains configurations for a Storage node in your flow. Stores an input in an Amazon S3 location." + }, + "AWS::Bedrock::Flow FlowNodeInput": { + "Expression": "An expression that formats the input for the node. For an explanation of how to create expressions, see [Expressions in Prompt flows in Amazon Bedrock](https://docs.aws.amazon.com/bedrock/latest/userguide/flows-expressions.html) .", + "Name": "A name for the input that you can reference.", + "Type": "The data type of the input. If the input doesn't match this type at runtime, a validation error will be thrown." + }, + "AWS::Bedrock::Flow FlowNodeOutput": { + "Name": "A name for the output that you can reference.", + "Type": "The data type of the output. If the output doesn't match this type at runtime, a validation error will be thrown." + }, + "AWS::Bedrock::Flow FlowValidation": { + "Message": "A message describing the validation error." + }, + "AWS::Bedrock::Flow KnowledgeBaseFlowNodeConfiguration": { + "KnowledgeBaseId": "The unique identifier of the knowledge base to query.", + "ModelId": "The unique identifier of the model or [inference profile](https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference.html) to use to generate a response from the query results. Omit this field if you want to return the retrieved results as an array." + }, + "AWS::Bedrock::Flow LambdaFunctionFlowNodeConfiguration": { + "LambdaArn": "The Amazon Resource Name (ARN) of the Lambda function to invoke." + }, + "AWS::Bedrock::Flow LexFlowNodeConfiguration": { + "BotAliasArn": "The Amazon Resource Name (ARN) of the Amazon Lex bot alias to invoke.", + "LocaleId": "The Region to invoke the Amazon Lex bot in." + }, + "AWS::Bedrock::Flow PromptFlowNodeConfiguration": { + "SourceConfiguration": "Specifies whether the prompt is from Prompt management or defined inline." + }, + "AWS::Bedrock::Flow PromptFlowNodeInlineConfiguration": { + "InferenceConfiguration": "Contains inference configurations for the prompt.", + "ModelId": "The unique identifier of the model or [inference profile](https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference.html) to run inference with.", + "TemplateConfiguration": "Contains a prompt and variables in the prompt that can be replaced with values at runtime.", + "TemplateType": "The type of prompt template." + }, + "AWS::Bedrock::Flow PromptFlowNodeResourceConfiguration": { + "PromptArn": "The Amazon Resource Name (ARN) of the prompt from Prompt management." + }, + "AWS::Bedrock::Flow PromptFlowNodeSourceConfiguration": { + "Inline": "Contains configurations for a prompt that is defined inline", + "Resource": "Contains configurations for a prompt from Prompt management." + }, + "AWS::Bedrock::Flow PromptInferenceConfiguration": { + "Text": "Contains inference configurations for a text prompt." + }, + "AWS::Bedrock::Flow PromptInputVariable": { + "Name": "The name of the variable." + }, + "AWS::Bedrock::Flow PromptModelInferenceConfiguration": { + "MaxTokens": "The maximum number of tokens to return in the response.", + "StopSequences": "A list of strings that define sequences after which the model will stop generating.", + "Temperature": "Controls the randomness of the response. Choose a lower value for more predictable outputs and a higher value for more surprising outputs.", + "TopK": "The number of most-likely candidates that the model considers for the next token during generation.", + "TopP": "The percentage of most-likely candidates that the model considers for the next token." + }, + "AWS::Bedrock::Flow PromptTemplateConfiguration": { + "Text": "Contains configurations for the text in a message for a prompt." + }, + "AWS::Bedrock::Flow RetrievalFlowNodeConfiguration": { + "ServiceConfiguration": "Contains configurations for the service to use for retrieving data to return as the output from the node." + }, + "AWS::Bedrock::Flow RetrievalFlowNodeS3Configuration": { + "BucketName": "The name of the Amazon S3 bucket from which to retrieve data." + }, + "AWS::Bedrock::Flow RetrievalFlowNodeServiceConfiguration": { + "S3": "Contains configurations for the Amazon S3 location from which to retrieve data to return as the output from the node." + }, + "AWS::Bedrock::Flow S3Location": { + "Bucket": "The S3 bucket containing the flow definition.", + "Key": "The object key for the S3 location containing the definition.", + "Version": "The Amazon S3 location from which to retrieve data for an S3 retrieve node or to which to store data for an S3 storage node." + }, + "AWS::Bedrock::Flow StorageFlowNodeConfiguration": { + "ServiceConfiguration": "Contains configurations for the service to use for storing the input into the node." + }, + "AWS::Bedrock::Flow StorageFlowNodeS3Configuration": { + "BucketName": "The name of the Amazon S3 bucket in which to store the input into the node." + }, + "AWS::Bedrock::Flow StorageFlowNodeServiceConfiguration": { + "S3": "Contains configurations for the Amazon S3 location in which to store the input into the node." + }, + "AWS::Bedrock::Flow TextPromptTemplateConfiguration": { + "InputVariables": "An array of the variables in the prompt template.", + "Text": "The message for the prompt." + }, + "AWS::Bedrock::FlowAlias": { + "Description": "A description of the alias.", + "FlowArn": "The Amazon Resource Name (ARN) of the alias.", + "Name": "The name of the alias.", + "RoutingConfiguration": "A list of configurations about the versions that the alias maps to. Currently, you can only specify one.", + "Tags": "Metadata that you can assign to a resource as key-value pairs. For more information, see the following resources:\n\n- [Tag naming limits and requirements](https://docs.aws.amazon.com/tag-editor/latest/userguide/tagging.html#tag-conventions)\n- [Tagging best practices](https://docs.aws.amazon.com/tag-editor/latest/userguide/tagging.html#tag-best-practices)" + }, + "AWS::Bedrock::FlowAlias FlowAliasRoutingConfigurationListItem": { + "FlowVersion": "The version that the alias maps to." + }, + "AWS::Bedrock::FlowVersion": { + "Description": "The description of the flow version.", + "FlowArn": "The Amazon Resource Name (ARN) of the flow that the version belongs to." + }, + "AWS::Bedrock::FlowVersion AgentFlowNodeConfiguration": { + "AgentAliasArn": "The Amazon Resource Name (ARN) of the alias of the agent to invoke." + }, + "AWS::Bedrock::FlowVersion ConditionFlowNodeConfiguration": { + "Conditions": "An array of conditions. Each member contains the name of a condition and an expression that defines the condition." + }, + "AWS::Bedrock::FlowVersion FlowCondition": { + "Expression": "Defines the condition. You must refer to at least one of the inputs in the condition. For more information, expand the Condition node section in [Node types in prompt flows](https://docs.aws.amazon.com/bedrock/latest/userguide/flows-how-it-works.html#flows-nodes) .", + "Name": "A name for the condition that you can reference." + }, + "AWS::Bedrock::FlowVersion FlowConditionalConnectionConfiguration": { + "Condition": "The condition that triggers this connection. For more information about how to write conditions, see the *Condition* node type in the [Node types](https://docs.aws.amazon.com/bedrock/latest/userguide/node-types.html) topic in the Amazon Bedrock User Guide." + }, + "AWS::Bedrock::FlowVersion FlowConnection": { + "Configuration": "The configuration of the connection.", + "Name": "A name for the connection that you can reference.", + "Source": "The node that the connection starts at.", + "Target": "The node that the connection ends at.", + "Type": "Whether the source node that the connection begins from is a condition node ( `Conditional` ) or not ( `Data` )." + }, + "AWS::Bedrock::FlowVersion FlowConnectionConfiguration": { + "Conditional": "The configuration of a connection originating from a Condition node.", + "Data": "The configuration of a connection originating from a node that isn't a Condition node." + }, + "AWS::Bedrock::FlowVersion FlowDataConnectionConfiguration": { + "SourceOutput": "The name of the output in the source node that the connection begins from.", + "TargetInput": "The name of the input in the target node that the connection ends at." + }, + "AWS::Bedrock::FlowVersion FlowDefinition": { + "Connections": "An array of connection definitions in the flow.", + "Nodes": "An array of node definitions in the flow." + }, + "AWS::Bedrock::FlowVersion FlowNode": { + "Configuration": "Contains configurations for the node.", + "Inputs": "An array of objects, each of which contains information about an input into the node.", + "Name": "A name for the node.", + "Outputs": "A list of objects, each of which contains information about an output from the node.", + "Type": "The type of node. This value must match the name of the key that you provide in the configuration you provide in the `FlowNodeConfiguration` field." + }, + "AWS::Bedrock::FlowVersion FlowNodeConfiguration": { + "Agent": "Contains configurations for an agent node in your flow. Invokes an alias of an agent and returns the response.", + "Collector": "Contains configurations for a collector node in your flow. Collects an iteration of inputs and consolidates them into an array of outputs.", + "Condition": "Contains configurations for a Condition node in your flow. Defines conditions that lead to different branches of the flow.", + "Input": "Contains configurations for an input flow node in your flow. The first node in the flow. `inputs` can't be specified for this node.", + "Iterator": "Contains configurations for an iterator node in your flow. Takes an input that is an array and iteratively sends each item of the array as an output to the following node. The size of the array is also returned in the output.\n\nThe output flow node at the end of the flow iteration will return a response for each member of the array. To return only one response, you can include a collector node downstream from the iterator node.", + "KnowledgeBase": "Contains configurations for a knowledge base node in your flow. Queries a knowledge base and returns the retrieved results or generated response.", + "LambdaFunction": "Contains configurations for a Lambda function node in your flow. Invokes an AWS Lambda function.", + "Lex": "Contains configurations for a Lex node in your flow. Invokes an Amazon Lex bot to identify the intent of the input and return the intent as the output.", + "Output": "Contains configurations for an output flow node in your flow. The last node in the flow. `outputs` can't be specified for this node.", + "Prompt": "Contains configurations for a prompt node in your flow. Runs a prompt and generates the model response as the output. You can use a prompt from Prompt management or you can configure one in this node.", + "Retrieval": "Contains configurations for a Retrieval node in your flow. Retrieves data from an Amazon S3 location and returns it as the output.", + "Storage": "Contains configurations for a Storage node in your flow. Stores an input in an Amazon S3 location." + }, + "AWS::Bedrock::FlowVersion FlowNodeInput": { + "Expression": "An expression that formats the input for the node. For an explanation of how to create expressions, see [Expressions in Prompt flows in Amazon Bedrock](https://docs.aws.amazon.com/bedrock/latest/userguide/flows-expressions.html) .", + "Name": "A name for the input that you can reference.", + "Type": "The data type of the input. If the input doesn't match this type at runtime, a validation error will be thrown." + }, + "AWS::Bedrock::FlowVersion FlowNodeOutput": { + "Name": "A name for the output that you can reference.", + "Type": "The data type of the output. If the output doesn't match this type at runtime, a validation error will be thrown." + }, + "AWS::Bedrock::FlowVersion KnowledgeBaseFlowNodeConfiguration": { + "KnowledgeBaseId": "The unique identifier of the knowledge base to query.", + "ModelId": "The unique identifier of the model or [inference profile](https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference.html) to use to generate a response from the query results. Omit this field if you want to return the retrieved results as an array." + }, + "AWS::Bedrock::FlowVersion LambdaFunctionFlowNodeConfiguration": { + "LambdaArn": "The Amazon Resource Name (ARN) of the Lambda function to invoke." + }, + "AWS::Bedrock::FlowVersion LexFlowNodeConfiguration": { + "BotAliasArn": "The Amazon Resource Name (ARN) of the Amazon Lex bot alias to invoke.", + "LocaleId": "The Region to invoke the Amazon Lex bot in." + }, + "AWS::Bedrock::FlowVersion PromptFlowNodeConfiguration": { + "SourceConfiguration": "Specifies whether the prompt is from Prompt management or defined inline." + }, + "AWS::Bedrock::FlowVersion PromptFlowNodeInlineConfiguration": { + "InferenceConfiguration": "Contains inference configurations for the prompt.", + "ModelId": "The unique identifier of the model or [inference profile](https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference.html) to run inference with.", + "TemplateConfiguration": "Contains a prompt and variables in the prompt that can be replaced with values at runtime.", + "TemplateType": "The type of prompt template." + }, + "AWS::Bedrock::FlowVersion PromptFlowNodeResourceConfiguration": { + "PromptArn": "The Amazon Resource Name (ARN) of the prompt from Prompt management." + }, + "AWS::Bedrock::FlowVersion PromptFlowNodeSourceConfiguration": { + "Inline": "Contains configurations for a prompt that is defined inline", + "Resource": "Contains configurations for a prompt from Prompt management." + }, + "AWS::Bedrock::FlowVersion PromptInferenceConfiguration": { + "Text": "Contains inference configurations for a text prompt." + }, + "AWS::Bedrock::FlowVersion PromptInputVariable": { + "Name": "The name of the variable." + }, + "AWS::Bedrock::FlowVersion PromptModelInferenceConfiguration": { + "MaxTokens": "The maximum number of tokens to return in the response.", + "StopSequences": "A list of strings that define sequences after which the model will stop generating.", + "Temperature": "Controls the randomness of the response. Choose a lower value for more predictable outputs and a higher value for more surprising outputs.", + "TopK": "The number of most-likely candidates that the model considers for the next token during generation.", + "TopP": "The percentage of most-likely candidates that the model considers for the next token." + }, + "AWS::Bedrock::FlowVersion PromptTemplateConfiguration": { + "Text": "Contains configurations for the text in a message for a prompt." + }, + "AWS::Bedrock::FlowVersion RetrievalFlowNodeConfiguration": { + "ServiceConfiguration": "Contains configurations for the service to use for retrieving data to return as the output from the node." + }, + "AWS::Bedrock::FlowVersion RetrievalFlowNodeS3Configuration": { + "BucketName": "The name of the Amazon S3 bucket from which to retrieve data." + }, + "AWS::Bedrock::FlowVersion RetrievalFlowNodeServiceConfiguration": { + "S3": "Contains configurations for the Amazon S3 location from which to retrieve data to return as the output from the node." + }, + "AWS::Bedrock::FlowVersion StorageFlowNodeConfiguration": { + "ServiceConfiguration": "Contains configurations for the service to use for storing the input into the node." + }, + "AWS::Bedrock::FlowVersion StorageFlowNodeS3Configuration": { + "BucketName": "The name of the Amazon S3 bucket in which to store the input into the node." + }, + "AWS::Bedrock::FlowVersion StorageFlowNodeServiceConfiguration": { + "S3": "Contains configurations for the Amazon S3 location in which to store the input into the node." + }, + "AWS::Bedrock::FlowVersion TextPromptTemplateConfiguration": { + "InputVariables": "An array of the variables in the prompt template.", + "Text": "The message for the prompt." }, "AWS::Bedrock::Guardrail": { "BlockedInputMessaging": "The message to return when the guardrail blocks a prompt.", "BlockedOutputsMessaging": "The message to return when the guardrail blocks a model response.", "ContentPolicyConfig": "The content filter policies to configure for the guardrail.", + "ContextualGroundingPolicyConfig": "", "Description": "A description of the guardrail.", "KmsKeyArn": "The ARN of the AWS KMS key that you use to encrypt the guardrail.", "Name": "The name of the guardrail.", @@ -4919,6 +5423,13 @@ "AWS::Bedrock::Guardrail ContentPolicyConfig": { "FiltersConfig": "Contains the type of the content filter and how strongly it should apply to prompts and model responses." }, + "AWS::Bedrock::Guardrail ContextualGroundingFilterConfig": { + "Threshold": "The threshold details for the guardrails contextual grounding filter.", + "Type": "The filter details for the guardrails contextual grounding filter." + }, + "AWS::Bedrock::Guardrail ContextualGroundingPolicyConfig": { + "FiltersConfig": "" + }, "AWS::Bedrock::Guardrail ManagedWordsConfig": { "Type": "The managed word type to configure for the guardrail." }, @@ -4968,9 +5479,29 @@ "StorageConfiguration": "Contains details about the storage configuration of the knowledge base.", "Tags": "Metadata that you can assign to a resource as key-value pairs. For more information, see the following resources:\n\n- [Tag naming limits and requirements](https://docs.aws.amazon.com/tag-editor/latest/userguide/tagging.html#tag-conventions)\n- [Tagging best practices](https://docs.aws.amazon.com/tag-editor/latest/userguide/tagging.html#tag-best-practices)" }, + "AWS::Bedrock::KnowledgeBase BedrockEmbeddingModelConfiguration": { + "Dimensions": "The dimensions details for the vector configuration used on the Bedrock embeddings model." + }, + "AWS::Bedrock::KnowledgeBase EmbeddingModelConfiguration": { + "BedrockEmbeddingModelConfiguration": "The vector configuration details on the Bedrock embeddings model." + }, "AWS::Bedrock::KnowledgeBase KnowledgeBaseConfiguration": { "Type": "The type of data that the data source is converted into for the knowledge base.", - "VectorKnowledgeBaseConfiguration": "Contains details about the embeddings model that'sused to convert the data source." + "VectorKnowledgeBaseConfiguration": "Contains details about the model that's used to convert the data source into vector embeddings." + }, + "AWS::Bedrock::KnowledgeBase MongoDbAtlasConfiguration": { + "CollectionName": "The collection name of the knowledge base in MongoDB Atlas.", + "CredentialsSecretArn": "The Amazon Resource Name (ARN) of the secret that you created in AWS Secrets Manager that contains user credentials for your MongoDB Atlas cluster.", + "DatabaseName": "The database name in your MongoDB Atlas cluster for your knowledge base.", + "Endpoint": "The endpoint URL of your MongoDB Atlas cluster for your knowledge base.", + "EndpointServiceName": "The name of the VPC endpoint service in your account that is connected to your MongoDB Atlas cluster.", + "FieldMapping": "Contains the names of the fields to which to map information about the vector store.", + "VectorIndexName": "The name of the MongoDB Atlas vector search index." + }, + "AWS::Bedrock::KnowledgeBase MongoDbAtlasFieldMapping": { + "MetadataField": "The name of the field in which Amazon Bedrock stores metadata about the vector store.", + "TextField": "The name of the field in which Amazon Bedrock stores the raw text from your data. The text is split according to the chunking strategy you choose.", + "VectorField": "The name of the field in which Amazon Bedrock stores the vector embeddings for your data sources." }, "AWS::Bedrock::KnowledgeBase OpenSearchServerlessConfiguration": { "CollectionArn": "The Amazon Resource Name (ARN) of the OpenSearch Service vector store.", @@ -5006,13 +5537,88 @@ "VectorField": "The name of the field in which Amazon Bedrock stores the vector embeddings for your data sources." }, "AWS::Bedrock::KnowledgeBase StorageConfiguration": { + "MongoDbAtlasConfiguration": "Contains the storage configuration of the knowledge base in MongoDB Atlas.", "OpensearchServerlessConfiguration": "Contains the storage configuration of the knowledge base in Amazon OpenSearch Service.", "PineconeConfiguration": "Contains the storage configuration of the knowledge base in Pinecone.", "RdsConfiguration": "Contains details about the storage configuration of the knowledge base in Amazon RDS. For more information, see [Create a vector index in Amazon RDS](https://docs.aws.amazon.com/bedrock/latest/userguide/knowledge-base-setup-rds.html) .", "Type": "The vector store service in which the knowledge base is stored." }, "AWS::Bedrock::KnowledgeBase VectorKnowledgeBaseConfiguration": { - "EmbeddingModelArn": "The Amazon Resource Name (ARN) of the model used to create vector embeddings for the knowledge base." + "EmbeddingModelArn": "The Amazon Resource Name (ARN) of the model used to create vector embeddings for the knowledge base.", + "EmbeddingModelConfiguration": "The embeddings model configuration details for the vector model used in Knowledge Base." + }, + "AWS::Bedrock::Prompt": { + "CustomerEncryptionKeyArn": "The Amazon Resource Name (ARN) of the KMS key that the prompt is encrypted with.", + "DefaultVariant": "The name of the default variant for the prompt. This value must match the `name` field in the relevant [PromptVariant](https://docs.aws.amazon.com/bedrock/latest/APIReference/API_agent_PromptVariant.html) object.", + "Description": "The description of the prompt.", + "Name": "The name of the prompt.", + "Tags": "Metadata that you can assign to a resource as key-value pairs. For more information, see the following resources:\n\n- [Tag naming limits and requirements](https://docs.aws.amazon.com/tag-editor/latest/userguide/tagging.html#tag-conventions)\n- [Tagging best practices](https://docs.aws.amazon.com/tag-editor/latest/userguide/tagging.html#tag-best-practices)", + "Variants": "A list of objects, each containing details about a variant of the prompt." + }, + "AWS::Bedrock::Prompt PromptInferenceConfiguration": { + "Text": "Contains inference configurations for a text prompt." + }, + "AWS::Bedrock::Prompt PromptInputVariable": { + "Name": "The name of the variable." + }, + "AWS::Bedrock::Prompt PromptModelInferenceConfiguration": { + "MaxTokens": "The maximum number of tokens to return in the response.", + "StopSequences": "A list of strings that define sequences after which the model will stop generating.", + "Temperature": "Controls the randomness of the response. Choose a lower value for more predictable outputs and a higher value for more surprising outputs.", + "TopK": "The number of most-likely candidates that the model considers for the next token during generation.", + "TopP": "The percentage of most-likely candidates that the model considers for the next token." + }, + "AWS::Bedrock::Prompt PromptTemplateConfiguration": { + "Text": "Contains configurations for the text in a message for a prompt." + }, + "AWS::Bedrock::Prompt PromptVariant": { + "InferenceConfiguration": "Contains inference configurations for the prompt variant.", + "ModelId": "The unique identifier of the model or [inference profile](https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference.html) with which to run inference on the prompt.", + "Name": "The name of the prompt variant.", + "TemplateConfiguration": "Contains configurations for the prompt template.", + "TemplateType": "The type of prompt template to use." + }, + "AWS::Bedrock::Prompt TextPromptTemplateConfiguration": { + "InputVariables": "An array of the variables in the prompt template.", + "Text": "The message for the prompt.", + "TextS3Location": "The Amazon S3 location of the prompt text." + }, + "AWS::Bedrock::Prompt TextS3Location": { + "Bucket": "The Amazon S3 bucket containing the prompt text.", + "Key": "The object key for the Amazon S3 location.", + "Version": "The version of the Amazon S3 location to use." + }, + "AWS::Bedrock::PromptVersion": { + "Description": "The description of the prompt version.", + "PromptArn": "The Amazon Resource Name (ARN) of the version of the prompt.", + "Tags": "" + }, + "AWS::Bedrock::PromptVersion PromptInferenceConfiguration": { + "Text": "Contains inference configurations for a text prompt." + }, + "AWS::Bedrock::PromptVersion PromptInputVariable": { + "Name": "The name of the variable." + }, + "AWS::Bedrock::PromptVersion PromptModelInferenceConfiguration": { + "MaxTokens": "The maximum number of tokens to return in the response.", + "StopSequences": "A list of strings that define sequences after which the model will stop generating.", + "Temperature": "Controls the randomness of the response. Choose a lower value for more predictable outputs and a higher value for more surprising outputs.", + "TopK": "The number of most-likely candidates that the model considers for the next token during generation.", + "TopP": "The percentage of most-likely candidates that the model considers for the next token." + }, + "AWS::Bedrock::PromptVersion PromptTemplateConfiguration": { + "Text": "Contains configurations for the text in a message for a prompt." + }, + "AWS::Bedrock::PromptVersion PromptVariant": { + "InferenceConfiguration": "Contains inference configurations for the prompt variant.", + "ModelId": "The unique identifier of the model or [inference profile](https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference.html) with which to run inference on the prompt.", + "Name": "The name of the prompt variant.", + "TemplateConfiguration": "Contains configurations for the prompt template.", + "TemplateType": "The type of prompt template to use." + }, + "AWS::Bedrock::PromptVersion TextPromptTemplateConfiguration": { + "InputVariables": "An array of the variables in the prompt template.", + "Text": "The message for the prompt." }, "AWS::BillingConductor::BillingGroup": { "AccountGrouping": "The set of accounts that will be under the billing group. The set of accounts resemble the linked accounts in a consolidated billing family.", @@ -5364,7 +5970,7 @@ "LoggingLevel": "Specifies the logging level for this configuration. This property affects the log entries pushed to Amazon CloudWatch Logs.\n\nLogging levels include `ERROR` , `INFO` , or `NONE` .", "SnsTopicArns": "The ARNs of the SNS topics that deliver notifications to AWS Chatbot .", "Tags": "The tags to add to the configuration.", - "TeamId": "The ID of the Microsoft Team authorized with AWS Chatbot .\n\nTo get the team ID, you must perform the initial authorization flow with Microsoft Teams in the AWS Chatbot console. Then you can copy and paste the team ID from the console. For more details, see steps 1-4 in [Get started with Microsoft Teams](https://docs.aws.amazon.com/chatbot/latest/adminguide/teams-setup.html#teams-client-setup) in the *AWS Chatbot Administrator Guide* .", + "TeamId": "The ID of the Microsoft Team authorized with AWS Chatbot .\n\nTo get the team ID, you must perform the initial authorization flow with Microsoft Teams in the AWS Chatbot console. Then you can copy and paste the team ID from the console. For more details, see steps 1-3 in [Get started with Microsoft Teams](https://docs.aws.amazon.com/chatbot/latest/adminguide/teams-setup.html#teams-client-setup) in the *AWS Chatbot Administrator Guide* .", "TeamsChannelId": "The ID of the Microsoft Teams channel.\n\nTo get the channel ID, open Microsoft Teams, right click on the channel name in the left pane, then choose Copy. An example of the channel ID syntax is: `19%3ab6ef35dc342d56ba5654e6fc6d25a071%40thread.tacv2` .", "TeamsTenantId": "The ID of the Microsoft Teams tenant.\n\nTo get the tenant ID, you must perform the initial authorization flow with Microsoft Teams in the AWS Chatbot console. Then you can copy and paste the tenant ID from the console. For more details, see steps 1-4 in [Get started with Microsoft Teams](https://docs.aws.amazon.com/chatbot/latest/adminguide/teams-setup.html#teams-client-setup) in the *AWS Chatbot Administrator Guide* .", "UserRoleRequired": "Enables use of a user role requirement in your chat configuration." @@ -5378,8 +5984,8 @@ "GuardrailPolicies": "The list of IAM policy ARNs that are applied as channel guardrails. The AWS managed 'AdministratorAccess' policy is applied as a default if this is not set.", "IamRoleArn": "The ARN of the IAM role that defines the permissions for AWS Chatbot .\n\nThis is a user-defined role that AWS Chatbot will assume. This is not the service-linked role. For more information, see [IAM Policies for AWS Chatbot](https://docs.aws.amazon.com/chatbot/latest/adminguide/chatbot-iam-policies.html) .", "LoggingLevel": "Specifies the logging level for this configuration. This property affects the log entries pushed to Amazon CloudWatch Logs.\n\nLogging levels include `ERROR` , `INFO` , or `NONE` .", - "SlackChannelId": "The ID of the Slack channel.\n\nTo get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, `ABCBBLZZZ` .", - "SlackWorkspaceId": "The ID of the Slack workspace authorized with AWS Chatbot .\n\nTo get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console. Then you can copy and paste the workspace ID from the console. For more details, see steps 1-4 in [Setting Up AWS Chatbot with Slack](https://docs.aws.amazon.com/chatbot/latest/adminguide/setting-up.html#Setup_intro) in the *AWS Chatbot User Guide* .", + "SlackChannelId": "The ID of the Slack channel.\n\nTo get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the character string at the end of the URL. For example, `ABCBBLZZZ` .", + "SlackWorkspaceId": "The ID of the Slack workspace authorized with AWS Chatbot .\n\nTo get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console. Then you can copy and paste the workspace ID from the console. For more details, see steps 1-3 in [Tutorial: Get started with Slack](https://docs.aws.amazon.com/chatbot/latest/adminguide/slack-setup.html) in the *AWS Chatbot User Guide* .", "SnsTopicArns": "The ARNs of the SNS topics that deliver notifications to AWS Chatbot .", "Tags": "The tags to add to the configuration.", "UserRoleRequired": "Enables use of a user role requirement in your chat configuration." @@ -5448,7 +6054,7 @@ "AWS::CleanRooms::ConfiguredTable": { "AllowedColumns": "The columns within the underlying AWS Glue table that can be utilized within collaborations.", "AnalysisMethod": "The analysis method for the configured table. The only valid value is currently `DIRECT_QUERY`.", - "AnalysisRules": "The entire created analysis rule.", + "AnalysisRules": "The analysis rule that was created for the configured table.", "Description": "A description for the configured table.", "Name": "A name for the configured table.", "TableReference": "The AWS Glue table that this configured table represents.", @@ -5468,6 +6074,7 @@ "Type": "The type of analysis rule." }, "AWS::CleanRooms::ConfiguredTable AnalysisRuleAggregation": { + "AdditionalAnalyses": "An indicator as to whether additional analyses (such as AWS Clean Rooms ML) can be applied to the output of the direct query.\n\nThe `additionalAnalyses` parameter is currently supported for the list analysis rule ( `AnalysisRuleList` ) and the custom analysis rule ( `AnalysisRuleCustom` ).", "AggregateColumns": "The columns that query runners are allowed to use in aggregation queries.", "AllowedJoinOperators": "Which logical operators (if any) are to be used in an INNER JOIN match condition. Default is `AND` .", "DimensionColumns": "The columns that query runners are allowed to select, group by, or filter by.", @@ -5477,11 +6084,14 @@ "ScalarFunctions": "Set of scalar functions that are allowed to be used on dimension columns and the output of aggregation of metrics." }, "AWS::CleanRooms::ConfiguredTable AnalysisRuleCustom": { + "AdditionalAnalyses": "An indicator as to whether additional analyses (such as AWS Clean Rooms ML) can be applied to the output of the direct query.", "AllowedAnalyses": "The ARN of the analysis templates that are allowed by the custom analysis rule.", "AllowedAnalysisProviders": "The IDs of the AWS accounts that are allowed to query by the custom analysis rule. Required when `allowedAnalyses` is `ANY_QUERY` .", - "DifferentialPrivacy": "The differential privacy configuration." + "DifferentialPrivacy": "The differential privacy configuration.", + "DisallowedOutputColumns": "A list of columns that aren't allowed to be shown in the query output." }, "AWS::CleanRooms::ConfiguredTable AnalysisRuleList": { + "AdditionalAnalyses": "An indicator as to whether additional analyses (such as AWS Clean Rooms ML) can be applied to the output of the direct query.", "AllowedJoinOperators": "The logical operators (if any) that are to be used in an INNER JOIN match condition. Default is `AND` .", "JoinColumns": "Columns that can be used to join a configured table with the table of the member who can query and other members' configured tables.", "ListColumns": "Columns that can be listed in the output." @@ -5512,6 +6122,7 @@ "Value": "The value of the tag." }, "AWS::CleanRooms::ConfiguredTableAssociation": { + "ConfiguredTableAssociationAnalysisRules": "An analysis rule for a configured table association. This analysis rule specifies how data from the table can be used within its associated collaboration. In the console, the `ConfiguredTableAssociationAnalysisRule` is referred to as the *collaboration analysis rule* .", "ConfiguredTableIdentifier": "A unique identifier for the configured table to be associated to. Currently accepts a configured table ID.", "Description": "A description of the configured table association.", "MembershipIdentifier": "The unique ID for the membership this configured table association belongs to.", @@ -5519,10 +6130,80 @@ "RoleArn": "The service will assume this role to access catalog metadata and query the table.", "Tags": "An optional label that you can assign to a resource when you create it. Each tag consists of a key and an optional value, both of which you define. When you use tagging, you can also use tag-based access control in IAM policies to control access to this resource." }, + "AWS::CleanRooms::ConfiguredTableAssociation ConfiguredTableAssociationAnalysisRule": { + "Policy": "The policy of the configured table association analysis rule.", + "Type": "The type of the configured table association analysis rule." + }, + "AWS::CleanRooms::ConfiguredTableAssociation ConfiguredTableAssociationAnalysisRuleAggregation": { + "AllowedAdditionalAnalyses": "The list of resources or wildcards (ARNs) that are allowed to perform additional analysis on query output.\n\nThe `allowedAdditionalAnalyses` parameter is currently supported for the list analysis rule ( `AnalysisRuleList` ) and the custom analysis rule ( `AnalysisRuleCustom` ).", + "AllowedResultReceivers": "The list of collaboration members who are allowed to receive results of queries run with this configured table." + }, + "AWS::CleanRooms::ConfiguredTableAssociation ConfiguredTableAssociationAnalysisRuleCustom": { + "AllowedAdditionalAnalyses": "The list of resources or wildcards (ARNs) that are allowed to perform additional analysis on query output.", + "AllowedResultReceivers": "The list of collaboration members who are allowed to receive results of queries run with this configured table." + }, + "AWS::CleanRooms::ConfiguredTableAssociation ConfiguredTableAssociationAnalysisRuleList": { + "AllowedAdditionalAnalyses": "The list of resources or wildcards (ARNs) that are allowed to perform additional analysis on query output.", + "AllowedResultReceivers": "The list of collaboration members who are allowed to receive results of queries run with this configured table." + }, + "AWS::CleanRooms::ConfiguredTableAssociation ConfiguredTableAssociationAnalysisRulePolicy": { + "V1": "The policy for the configured table association analysis rule." + }, + "AWS::CleanRooms::ConfiguredTableAssociation ConfiguredTableAssociationAnalysisRulePolicyV1": { + "Aggregation": "Analysis rule type that enables only aggregation queries on a configured table.", + "Custom": "Analysis rule type that enables the table owner to approve custom SQL queries on their configured tables. It supports differential privacy.", + "List": "Analysis rule type that enables only list queries on a configured table." + }, "AWS::CleanRooms::ConfiguredTableAssociation Tag": { "Key": "The key of the tag.", "Value": "The value of the tag." }, + "AWS::CleanRooms::IdMappingTable": { + "Description": "The description of the ID mapping table.", + "InputReferenceConfig": "The input reference configuration for the ID mapping table.", + "KmsKeyArn": "The Amazon Resource Name (ARN) of the AWS KMS key.", + "MembershipIdentifier": "The unique identifier of the membership resource for the ID mapping table.", + "Name": "The name of the ID mapping table.", + "Tags": "An optional label that you can assign to a resource when you create it. Each tag consists of a key and an optional value, both of which you define. When you use tagging, you can also use tag-based access control in IAM policies to control access to this resource." + }, + "AWS::CleanRooms::IdMappingTable IdMappingTableInputReferenceConfig": { + "InputReferenceArn": "The Amazon Resource Name (ARN) of the referenced resource in AWS Entity Resolution . Valid values are ID mapping workflow ARNs.", + "ManageResourcePolicies": "When `TRUE` , AWS Clean Rooms manages permissions for the ID mapping table resource.\n\nWhen `FALSE` , the resource owner manages permissions for the ID mapping table resource." + }, + "AWS::CleanRooms::IdMappingTable IdMappingTableInputReferenceProperties": { + "IdMappingTableInputSource": "The input source of the ID mapping table." + }, + "AWS::CleanRooms::IdMappingTable IdMappingTableInputSource": { + "IdNamespaceAssociationId": "The unique identifier of the ID namespace association.", + "Type": "The type of the input source of the ID mapping table." + }, + "AWS::CleanRooms::IdMappingTable Tag": { + "Key": "The key of the tag.", + "Value": "The value of the tag." + }, + "AWS::CleanRooms::IdNamespaceAssociation": { + "Description": "The description of the ID namespace association.", + "IdMappingConfig": "The configuration settings for the ID mapping table.", + "InputReferenceConfig": "The input reference configuration for the ID namespace association.", + "MembershipIdentifier": "The unique identifier of the membership that contains the ID namespace association.", + "Name": "The name of this ID namespace association.", + "Tags": "" + }, + "AWS::CleanRooms::IdNamespaceAssociation IdMappingConfig": { + "AllowUseAsDimensionColumn": "An indicator as to whether you can use your column as a dimension column in the ID mapping table ( `TRUE` ) or not ( `FALSE` ).\n\nDefault is `FALSE` ." + }, + "AWS::CleanRooms::IdNamespaceAssociation IdNamespaceAssociationInputReferenceConfig": { + "InputReferenceArn": "The Amazon Resource Name (ARN) of the AWS Entity Resolution resource that is being associated to the collaboration. Valid resource ARNs are from the ID namespaces that you own.", + "ManageResourcePolicies": "When `TRUE` , AWS Clean Rooms manages permissions for the ID namespace association resource.\n\nWhen `FALSE` , the resource owner manages permissions for the ID namespace association resource." + }, + "AWS::CleanRooms::IdNamespaceAssociation IdNamespaceAssociationInputReferenceProperties": { + "IdMappingWorkflowsSupported": "Defines how ID mapping workflows are supported for this ID namespace association.", + "IdNamespaceType": "The ID namespace type for this ID namespace association." + }, + "AWS::CleanRooms::IdNamespaceAssociation Tag": { + "Key": "The key of the tag.", + "Value": "The value of the tag." + }, "AWS::CleanRooms::Membership": { "CollaborationIdentifier": "The unique ID for the associated collaboration.", "DefaultResultConfiguration": "The default protected query result configuration as specified by the member who can receive results.", @@ -5534,7 +6215,7 @@ "QueryCompute": "The payment responsibilities accepted by the collaboration member for query compute costs." }, "AWS::CleanRooms::Membership MembershipProtectedQueryOutputConfiguration": { - "S3": "Required configuration for a protected query with an `S3` output type." + "S3": "Required configuration for a protected query with an `s3` output type." }, "AWS::CleanRooms::Membership MembershipProtectedQueryResultConfiguration": { "OutputConfiguration": "Configuration for protected query results.", @@ -5555,7 +6236,7 @@ "AWS::CleanRooms::PrivacyBudgetTemplate": { "AutoRefresh": "How often the privacy budget refreshes.\n\n> If you plan to regularly bring new data into the collaboration, use `CALENDAR_MONTH` to automatically get a new privacy budget for the collaboration every calendar month. Choosing this option allows arbitrary amounts of information to be revealed about rows of the data when repeatedly queried across refreshes. Avoid choosing this if the same rows will be repeatedly queried between privacy budget refreshes.", "MembershipIdentifier": "The identifier for a membership resource.", - "Parameters": "Specifies the epislon and noise parameters for the privacy budget template.", + "Parameters": "Specifies the epsilon and noise parameters for the privacy budget template.", "PrivacyBudgetType": "Specifies the type of the privacy budget template.", "Tags": "" }, @@ -5628,10 +6309,10 @@ "VersionId": "The version ID of the type specified.\n\nYou must specify either `TypeVersionArn` , or `TypeName` and `VersionId` ." }, "AWS::CloudFormation::HookTypeConfig": { - "Configuration": "Specifies the activated hook type configuration, in this AWS account and AWS Region .\n\nYou must specify either `TypeName` and `Configuration` or `TypeARN` and `Configuration` .", + "Configuration": "Specifies the activated hook type configuration, in this AWS account and AWS Region .\n\nYou must specify either `TypeName` and `Configuration` or `TypeArn` and `Configuration` .", "ConfigurationAlias": "Specifies the activated hook type configuration, in this AWS account and AWS Region .\n\nDefaults to `default` alias. Hook types currently support default configuration alias.", - "TypeArn": "The Amazon Resource Number (ARN) for the hook to set `Configuration` for.\n\nYou must specify either `TypeName` and `Configuration` or `TypeARN` and `Configuration` .", - "TypeName": "The unique name for your hook. Specifies a three-part namespace for your hook, with a recommended pattern of `Organization::Service::Hook` .\n\nYou must specify either `TypeName` and `Configuration` or `TypeARN` and `Configuration` ." + "TypeArn": "The Amazon Resource Number (ARN) for the hook to set `Configuration` for.\n\nYou must specify either `TypeName` and `Configuration` or `TypeArn` and `Configuration` .", + "TypeName": "The unique name for your hook. Specifies a three-part namespace for your hook, with a recommended pattern of `Organization::Service::Hook` .\n\nYou must specify either `TypeName` and `Configuration` or `TypeArn` and `Configuration` ." }, "AWS::CloudFormation::HookVersion": { "ExecutionRoleArn": "The Amazon Resource Name (ARN) of the task execution role that grants the hook permission.", @@ -5662,13 +6343,13 @@ "AWS::CloudFormation::PublicTypeVersion": { "Arn": "The Amazon Resource Number (ARN) of the extension.\n\nConditional: You must specify `Arn` , or `TypeName` and `Type` .", "LogDeliveryBucket": "The S3 bucket to which CloudFormation delivers the contract test execution logs.\n\nCloudFormation delivers the logs by the time contract testing has completed and the extension has been assigned a test type status of `PASSED` or `FAILED` .\n\nThe user initiating the stack operation must be able to access items in the specified S3 bucket. Specifically, the user needs the following permissions:\n\n- GetObject\n- PutObject\n\nFor more information, see [Actions, Resources, and Condition Keys for Amazon S3](https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazons3.html) in the *AWS Identity and Access Management User Guide* .", - "PublicVersionNumber": "The version number to assign to this version of the extension.\n\nUse the following format, and adhere to semantic versioning when assigning a version number to your extension:\n\n`MAJOR.MINOR.PATCH`\n\nFor more information, see [Semantic Versioning 2.0.0](https://docs.aws.amazon.com/https://semver.org/) .\n\nIf you don't specify a version number, CloudFormation increments the version number by one minor version release.\n\nYou cannot specify a version number the first time you publish a type. AWS CloudFormation automatically sets the first version number to be `1.0.0` .", + "PublicVersionNumber": "The version number to assign to this version of the extension.\n\nUse the following format, and adhere to semantic versioning when assigning a version number to your extension:\n\n`MAJOR.MINOR.PATCH`\n\nFor more information, see [Semantic Versioning 2.0.0](https://docs.aws.amazon.com/https://semver.org/) .\n\nIf you don't specify a version number, CloudFormation increments the version number by one minor version release.\n\nYou cannot specify a version number the first time you publish a type. CloudFormation automatically sets the first version number to be `1.0.0` .", "Type": "The type of the extension to test.\n\nConditional: You must specify `Arn` , or `TypeName` and `Type` .", "TypeName": "The name of the extension to test.\n\nConditional: You must specify `Arn` , or `TypeName` and `Type` ." }, "AWS::CloudFormation::Publisher": { "AcceptTermsAndConditions": "Whether you accept the [Terms and Conditions](https://docs.aws.amazon.com/https://cloudformation-registry-documents.s3.amazonaws.com/Terms_and_Conditions_for_AWS_CloudFormation_Registry_Publishers.pdf) for publishing extensions in the CloudFormation registry. You must accept the terms and conditions in order to register to publish public extensions to the CloudFormation registry.\n\nThe default is `false` .", - "ConnectionArn": "If you are using a Bitbucket or GitHub account for identity verification, the Amazon Resource Name (ARN) for your connection to that account.\n\nFor more information, see [Registering your account to publish CloudFormation extensions](https://docs.aws.amazon.com/cloudformation-cli/latest/userguide/publish-extension.html#publish-extension-prereqs) in the *CloudFormation CLI User Guide* ." + "ConnectionArn": "If you are using a Bitbucket or GitHub account for identity verification, the Amazon Resource Name (ARN) for your connection to that account.\n\nFor more information, see [Prerequisite: Registering your account to publish CloudFormation extensions](https://docs.aws.amazon.com/cloudformation-cli/latest/userguide/publish-extension.html#publish-extension-prereqs) in the *AWS CloudFormation Command Line Interface (CLI) User Guide* ." }, "AWS::CloudFormation::ResourceDefaultVersion": { "TypeName": "The name of the resource.\n\nConditional: You must specify either `TypeVersionArn` , or `TypeName` and `VersionId` .", @@ -5693,19 +6374,19 @@ "DisableRollback": "Set to `true` to disable rollback of the stack if stack creation failed. You can specify either `DisableRollback` or `OnFailure` , but not both.\n\nDefault: `false`", "EnableTerminationProtection": "Whether to enable termination protection on the specified stack. If a user attempts to delete a stack with termination protection enabled, the operation fails and the stack remains unchanged. For more information, see [Protecting a Stack From Being Deleted](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-protect-stacks.html) in the *AWS CloudFormation User Guide* . Termination protection is deactivated on stacks by default.\n\nFor [nested stacks](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-nested-stacks.html) , termination protection is set on the root stack and can't be changed directly on the nested stack.", "LastUpdateTime": "The time the stack was last updated. This field will only be returned if the stack has been updated at least once.", - "NotificationARNs": "The Amazon Simple Notification Service (Amazon SNS) topic ARNs to publish stack related events. You can find your Amazon SNS topic ARNs using the Amazon SNS console or your Command Line Interface (CLI).", + "NotificationARNs": "The Amazon SNS topic ARNs to publish stack related events. You can find your Amazon SNS topic ARNs using the Amazon SNS console or your Command Line Interface (CLI).", "Outputs": "A list of output structures.", "Parameters": "The set value pairs that represent the parameters passed to CloudFormation when this nested stack is created. Each parameter has a name corresponding to a parameter defined in the embedded template and a value representing the value that you want to set for the parameter.\n\n> If you use the `Ref` function to pass a parameter value to a nested stack, comma-delimited list parameters must be of type `String` . In other words, you can't pass values that are of type `CommaDelimitedList` to nested stacks. \n\nConditional. Required if the nested stack requires input parameters.\n\nWhether an update causes interruptions depends on the resources that are being updated. An update never causes a nested stack to be replaced.", - "ParentId": "For nested stacks--stacks created as resources for another stack--the stack ID of the direct parent of this stack. For the first level of nested stacks, the root stack is also the parent stack.\n\nFor more information, see [Working with Nested Stacks](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-nested-stacks.html) in the *AWS CloudFormation User Guide* .", + "ParentId": "For nested stacks--stacks created as resources for another stack--the stack ID of the direct parent of this stack. For the first level of nested stacks, the root stack is also the parent stack.\n\nFor more information, see [Embed stacks within other stacks using nested stacks](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-nested-stacks.html) in the *AWS CloudFormation User Guide* .", "RoleARN": "The Amazon Resource Name (ARN) of an AWS Identity and Access Management (IAM) role that AWS CloudFormation assumes to create the stack. AWS CloudFormation uses the role's credentials to make calls on your behalf. AWS CloudFormation always uses this role for all future operations on the stack. Provided that users have permission to operate on the stack, AWS CloudFormation uses this role even if the users don't have permission to pass it. Ensure that the role grants least privilege.\n\nIf you don't specify a value, AWS CloudFormation uses the role that was previously associated with the stack. If no role is available, AWS CloudFormation uses a temporary session that's generated from your user credentials.", - "RootId": "For nested stacks--stacks created as resources for another stack--the stack ID of the top-level stack to which the nested stack ultimately belongs.\n\nFor more information, see [Working with Nested Stacks](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-nested-stacks.html) in the *AWS CloudFormation User Guide* .", + "RootId": "For nested stacks--stacks created as resources for another stack--the stack ID of the top-level stack to which the nested stack ultimately belongs.\n\nFor more information, see [Embed stacks within other stacks using nested stacks](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-nested-stacks.html) in the *AWS CloudFormation User Guide* .", "StackId": "Unique identifier of the stack.", "StackName": "The name that's associated with the stack. The name must be unique in the Region in which you are creating the stack.\n\n> A stack name can contain only alphanumeric characters (case sensitive) and hyphens. It must start with an alphabetical character and can't be longer than 128 characters.", "StackPolicyBody": "Structure containing the stack policy body. For more information, go to [Prevent Updates to Stack Resources](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/protect-stack-resources.html) in the *AWS CloudFormation User Guide* . You can specify either the `StackPolicyBody` or the `StackPolicyURL` parameter, but not both.", "StackPolicyURL": "Location of a file containing the stack policy. The URL must point to a policy (maximum size: 16 KB) located in an S3 bucket in the same Region as the stack. You can specify either the `StackPolicyBody` or the `StackPolicyURL` parameter, but not both.", "StackStatus": "Current status of the stack.", "StackStatusReason": "Success/failure message associated with the stack status.", - "Tags": "Key-value pairs to associate with this stack. AWS CloudFormation also propagates these tags to the resources created in the stack. A maximum number of 50 tags can be specified.", + "Tags": "Key-value pairs to associate with this stack. CloudFormation also propagates these tags to the resources created in the stack. A maximum number of 50 tags can be specified.", "TemplateBody": "Structure containing the template body with a minimum length of 1 byte and a maximum length of 51,200 bytes. For more information, go to [Template anatomy](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-anatomy.html) in the AWS CloudFormation User Guide.\n\nConditional: You must specify either the `TemplateBody` or the `TemplateURL` parameter, but not both.", "TemplateURL": "Location of file containing the template body. The URL must point to a template (max size: 460,800 bytes) that's located in an Amazon S3 bucket. For more information, see [Template anatomy](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-anatomy.html) .\n\nWhether an update causes interruptions depends on the resources that are being updated. An update never causes a nested stack to be replaced.", "TimeoutInMinutes": "The length of time, in minutes, that CloudFormation waits for the nested stack to reach the `CREATE_COMPLETE` state. The default is no timeout. When CloudFormation detects that the nested stack has reached the `CREATE_COMPLETE` state, it marks the nested stack resource as `CREATE_COMPLETE` in the parent stack and resumes creating the parent stack. If the timeout period expires before the nested stack reaches `CREATE_COMPLETE` , CloudFormation marks the nested stack as failed and rolls back both the nested stack and parent stack.\n\nUpdates aren't supported." @@ -5717,7 +6398,7 @@ "OutputValue": "The value associated with the output." }, "AWS::CloudFormation::Stack Tag": { - "Key": "*Required* . A string used to identify this tag. You can specify a maximum of 128 characters for a tag key. Tags owned by Amazon Web Services ( AWS ) have the reserved prefix: `aws:` .", + "Key": "*Required* . A string used to identify this tag. You can specify a maximum of 128 characters for a tag key. Tags owned by AWS have the reserved prefix: `aws:` .", "Value": "*Required* . A string containing the value for this tag. You can specify a maximum of 256 characters for a tag value." }, "AWS::CloudFormation::StackSet": { @@ -5733,7 +6414,7 @@ "PermissionModel": "Describes how the IAM roles required for stack set operations are created.\n\n- With `SELF_MANAGED` permissions, you must create the administrator and execution roles required to deploy to target accounts. For more information, see [Grant Self-Managed Stack Set Permissions](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/stacksets-prereqs-self-managed.html) .\n- With `SERVICE_MANAGED` permissions, StackSets automatically creates the IAM roles required to deploy to accounts managed by AWS Organizations .", "StackInstancesGroup": "A group of stack instances with parameters in some specific accounts and Regions.", "StackSetName": "The name to associate with the stack set. The name must be unique in the Region where you create your stack set.\n\n> The `StackSetName` property is required.", - "Tags": "Key-value pairs to associate with this stack. AWS CloudFormation also propagates these tags to supported resources in the stack. You can specify a maximum number of 50 tags.\n\nIf you don't specify this parameter, AWS CloudFormation doesn't modify the stack's tags. If you specify an empty value, AWS CloudFormation removes all associated tags.", + "Tags": "Key-value pairs to associate with this stack. CloudFormation also propagates these tags to supported resources in the stack. You can specify a maximum number of 50 tags.\n\nIf you don't specify this parameter, CloudFormation doesn't modify the stack's tags. If you specify an empty value, CloudFormation removes all associated tags.", "TemplateBody": "The structure that contains the template body, with a minimum length of 1 byte and a maximum length of 51,200 bytes.\n\nYou must include either `TemplateURL` or `TemplateBody` in a StackSet, but you can't use both. Dynamic references in the `TemplateBody` may not work correctly in all cases. It's recommended to pass templates containing dynamic references through `TemplateUrl` instead.", "TemplateURL": "Location of file containing the template body. The URL must point to a template that's located in an Amazon S3 bucket or a Systems Manager document. For more information, go to [Template Anatomy](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-anatomy.html) in the AWS CloudFormation User Guide.\n\nConditional: You must specify only one of the following parameters: `TemplateBody` , `TemplateURL` ." }, @@ -5742,7 +6423,7 @@ "RetainStacksOnAccountRemoval": "If set to `true` , stack resources are retained when an account is removed from a target organization or OU. If set to `false` , stack resources are deleted. Specify only if `Enabled` is set to `True` ." }, "AWS::CloudFormation::StackSet DeploymentTargets": { - "AccountFilterType": "Limit deployment targets to individual accounts or include additional accounts with provided OUs.\n\nThe following is a list of possible values for the `AccountFilterType` operation.\n\n- `INTERSECTION` : StackSets deploys to the accounts specified in `Accounts` parameter.\n- `DIFFERENCE` : StackSets excludes the accounts specified in `Accounts` parameter. This enables user to avoid certain accounts within an OU such as suspended accounts.\n- `UNION` : StackSets includes additional accounts deployment targets.\n\nThis is the default value if `AccountFilterType` is not provided. This enables user to update an entire OU and individual accounts from a different OU in one request, which used to be two separate requests.\n- `NONE` : Deploys to all the accounts in specified organizational units (OU).", + "AccountFilterType": "Limit deployment targets to individual accounts or include additional accounts with provided OUs.\n\nThe following is a list of possible values for the `AccountFilterType` operation.\n\n- `INTERSECTION` : StackSet deploys to the accounts specified in the `Accounts` parameter.\n- `DIFFERENCE` : StackSet deploys to the OU, excluding the accounts specified in the `Accounts` parameter.\n- `UNION` StackSet deploys to the OU, and the accounts specified in the `Accounts` parameter. `UNION` is not supported for create operations when using StackSet as a resource.", "Accounts": "The names of one or more AWS accounts for which you want to deploy stack set updates.\n\n*Pattern* : `^[0-9]{12}$`", "AccountsUrl": "Returns the value of the `AccountsUrl` property.", "OrganizationalUnitIds": "The organization root ID or organizational unit (OU) IDs to which StackSets deploys.\n\n*Pattern* : `^(ou-[a-z0-9]{4,32}-[a-z0-9]{8,32}|r-[a-z0-9]{4,32})$`" @@ -5769,7 +6450,7 @@ "Regions": "The names of one or more Regions where you want to create stack instances using the specified AWS accounts ." }, "AWS::CloudFormation::StackSet Tag": { - "Key": "*Required* . A string used to identify this tag. You can specify a maximum of 128 characters for a tag key. Tags owned by Amazon Web Services ( AWS ) have the reserved prefix: `aws:` .", + "Key": "*Required* . A string used to identify this tag. You can specify a maximum of 128 characters for a tag key. Tags owned by AWS have the reserved prefix: `aws:` .", "Value": "*Required* . A string containing the value for this tag. You can specify a maximum of 256 characters for a tag value." }, "AWS::CloudFormation::TypeActivation": { @@ -5850,16 +6531,16 @@ "Value": "The request header value." }, "AWS::CloudFront::ContinuousDeploymentPolicy SingleHeaderPolicyConfig": { - "Header": "", - "Value": "" + "Header": "The name of the HTTP header that CloudFront uses to configure for the single header policy.", + "Value": "Specifies the value to assign to the header for a single header policy." }, "AWS::CloudFront::ContinuousDeploymentPolicy SingleWeightConfig": { "SessionStickinessConfig": "Session stickiness provides the ability to define multiple requests from a single viewer as a single session. This prevents the potentially inconsistent experience of sending some of a given user's requests to your staging distribution, while others are sent to your primary distribution. Define the session duration using TTL values.", "Weight": "The percentage of traffic to send to a staging distribution, expressed as a decimal number between 0 and 0.15. For example, a value of 0.10 means 10% of traffic is sent to the staging distribution." }, "AWS::CloudFront::ContinuousDeploymentPolicy SingleWeightPolicyConfig": { - "SessionStickinessConfig": "", - "Weight": "" + "SessionStickinessConfig": "Enable session stickiness for the associated origin or cache settings.", + "Weight": "The percentage of requests that CloudFront will use to send to an associated origin or cache settings." }, "AWS::CloudFront::ContinuousDeploymentPolicy TrafficConfig": { "SingleHeaderConfig": "Determines which HTTP requests are sent to the staging distribution.", @@ -5933,12 +6614,12 @@ }, "AWS::CloudFront::Distribution DistributionConfig": { "Aliases": "A complex type that contains information about CNAMEs (alternate domain names), if any, for this distribution.", - "CNAMEs": "", + "CNAMEs": "An alias for the CloudFront distribution's domain name.\n\n> This property is legacy. We recommend that you use [Aliases](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudfront-distribution-distributionconfig.html#cfn-cloudfront-distribution-distributionconfig-aliases) instead.", "CacheBehaviors": "A complex type that contains zero or more `CacheBehavior` elements.", "Comment": "A comment to describe the distribution. The comment cannot be longer than 128 characters.", "ContinuousDeploymentPolicyId": "The identifier of a continuous deployment policy. For more information, see `CreateContinuousDeploymentPolicy` .", "CustomErrorResponses": "A complex type that controls the following:\n\n- Whether CloudFront replaces HTTP status codes in the 4xx and 5xx range with custom error messages before returning the response to the viewer.\n- How long CloudFront caches HTTP status codes in the 4xx and 5xx range.\n\nFor more information about custom error pages, see [Customizing Error Responses](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/custom-error-pages.html) in the *Amazon CloudFront Developer Guide* .", - "CustomOrigin": "", + "CustomOrigin": "The user-defined HTTP server that serves as the origin for content that CloudFront distributes.\n\n> This property is legacy. We recommend that you use [Origin](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudfront-distribution-origin.html) instead.", "DefaultCacheBehavior": "A complex type that describes the default cache behavior if you don't specify a `CacheBehavior` element or if files don't match any of the values of `PathPattern` in `CacheBehavior` elements. You must create exactly one default cache behavior.", "DefaultRootObject": "The object that you want CloudFront to request from your origin (for example, `index.html` ) when a viewer requests the root URL for your distribution ( `https://www.example.com` ) instead of an object in your distribution ( `https://www.example.com/product-description.html` ). Specifying a default root object avoids exposing the contents of your distribution.\n\nSpecify only the object name, for example, `index.html` . Don't add a `/` before the object name.\n\nIf you don't want to specify a default root object when you create a distribution, include an empty `DefaultRootObject` element.\n\nTo delete the default root object from an existing distribution, update the distribution configuration and include an empty `DefaultRootObject` element.\n\nTo replace the default root object, update the distribution configuration and specify the new object.\n\nFor more information about the default root object, see [Creating a Default Root Object](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/DefaultRootObject.html) in the *Amazon CloudFront Developer Guide* .", "Enabled": "From this field, you can enable or disable the selected distribution.", @@ -5949,7 +6630,7 @@ "Origins": "A complex type that contains information about origins for this distribution.\n\nSpecify a value for either the `Origins` or `OriginGroups` property.", "PriceClass": "The price class that corresponds with the maximum price that you want to pay for CloudFront service. If you specify `PriceClass_All` , CloudFront responds to requests for your objects from all CloudFront edge locations.\n\nIf you specify a price class other than `PriceClass_All` , CloudFront serves your objects from the CloudFront edge location that has the lowest latency among the edge locations in your price class. Viewers who are in or near regions that are excluded from your specified price class may encounter slower performance.\n\nFor more information about price classes, see [Choosing the Price Class for a CloudFront Distribution](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/PriceClass.html) in the *Amazon CloudFront Developer Guide* . For information about CloudFront pricing, including how price classes (such as Price Class 100) map to CloudFront regions, see [Amazon CloudFront Pricing](https://docs.aws.amazon.com/cloudfront/pricing/) .", "Restrictions": "A complex type that identifies ways in which you want to restrict distribution of your content.", - "S3Origin": "", + "S3Origin": "The origin as an Amazon S3 bucket.\n\n> This property is legacy. We recommend that you use [Origin](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudfront-distribution-origin.html) instead.", "Staging": "A Boolean that indicates whether this is a staging distribution. When this value is `true` , this is a staging distribution. When this value is `false` , this is not a staging distribution.", "ViewerCertificate": "A complex type that determines the distribution's SSL/TLS configuration for communicating with viewers.", "WebACLId": "A unique identifier that specifies the AWS WAF web ACL, if any, to associate with this distribution. To specify a web ACL created using the latest version of AWS WAF , use the ACL ARN, for example `arn:aws:wafv2:us-east-1:123456789012:global/webacl/ExampleWebACL/a1b2c3d4-5678-90ab-cdef-EXAMPLE11111` . To specify a web ACL created using AWS WAF Classic, use the ACL ID, for example `a1b2c3d4-5678-90ab-cdef-EXAMPLE11111` .\n\nAWS WAF is a web application firewall that lets you monitor the HTTP and HTTPS requests that are forwarded to CloudFront, and lets you control access to your content. Based on conditions that you specify, such as the IP addresses that requests originate from or the values of query strings, CloudFront responds to requests either with the requested content or with an HTTP 403 status code (Forbidden). You can also configure CloudFront to return a custom error page when a request is blocked. For more information about AWS WAF , see the [AWS WAF Developer Guide](https://docs.aws.amazon.com/waf/latest/developerguide/what-is-aws-waf.html) ." @@ -5974,15 +6655,15 @@ "LambdaFunctionARN": "The ARN of the Lambda@Edge function. You must specify the ARN of a function version; you can't specify an alias or $LATEST." }, "AWS::CloudFront::Distribution LegacyCustomOrigin": { - "DNSName": "", - "HTTPPort": "", - "HTTPSPort": "", - "OriginProtocolPolicy": "", - "OriginSSLProtocols": "" + "DNSName": "The domain name assigned to your CloudFront distribution.", + "HTTPPort": "The HTTP port that CloudFront uses to connect to the origin. Specify the HTTP port that the origin listens on.", + "HTTPSPort": "The HTTPS port that CloudFront uses to connect to the origin. Specify the HTTPS port that the origin listens on.", + "OriginProtocolPolicy": "Specifies the protocol (HTTP or HTTPS) that CloudFront uses to connect to the origin.", + "OriginSSLProtocols": "The minimum SSL/TLS protocol version that CloudFront uses when communicating with your origin server over HTTPs.\n\nFor more information, see [Minimum Origin SSL Protocol](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-values-specify.html#DownloadDistValuesOriginSSLProtocols) in the *Amazon CloudFront Developer Guide* ." }, "AWS::CloudFront::Distribution LegacyS3Origin": { - "DNSName": "", - "OriginAccessIdentity": "" + "DNSName": "The domain name assigned to your CloudFront distribution.", + "OriginAccessIdentity": "The CloudFront origin access identity to associate with the distribution. Use an origin access identity to configure the distribution so that end users can only access objects in an Amazon S3 through CloudFront .\n\n> This property is legacy. We recommend that you use [OriginAccessControl](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudfront-originaccesscontrol.html) instead." }, "AWS::CloudFront::Distribution Logging": { "Bucket": "The Amazon S3 bucket to store the access logs in, for example, `myawslogbucket.s3.amazonaws.com` .", @@ -6302,7 +6983,7 @@ "AWS::CloudTrail::EventDataStore AdvancedFieldSelector": { "EndsWith": "An operator that includes events that match the last few characters of the event record field specified as the value of `Field` .", "Equals": "An operator that includes events that match the exact value of the event record field specified as the value of `Field` . This is the only valid operator that you can use with the `readOnly` , `eventCategory` , and `resources.type` fields.", - "Field": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `readOnly` , `eventCategory` , and `eventSource` .\n\nFor CloudTrail data events, supported fields include `readOnly` , `eventCategory` , `eventName` , `resources.type` , and `resources.ARN` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - Optional. Can be set to `Equals` a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - For filtering management events only. This can be set to `NotEquals` `kms.amazonaws.com` or `NotEquals` `rdsdata.amazonaws.com` .\n- *`eventName`* - Can use any operator. You can use it to \ufb01lter in or \ufb01lter out any data event logged to CloudTrail, such as `PutBucket` or `GetSnapshotBlock` . You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator, and the value can be one of the following:\n\n- `AWS::AppConfig::Configuration`\n- `AWS::B2BI::Transformer`\n- `AWS::Bedrock::AgentAlias`\n- `AWS::Bedrock::FlowAlias`\n- `AWS::Bedrock::Guardrail`\n- `AWS::Bedrock::KnowledgeBase`\n- `AWS::Cassandra::Table`\n- `AWS::CloudFront::KeyValueStore`\n- `AWS::CloudTrail::Channel`\n- `AWS::CloudWatch::Metric`\n- `AWS::CodeWhisperer::Customization`\n- `AWS::CodeWhisperer::Profile`\n- `AWS::Cognito::IdentityPool`\n- `AWS::DynamoDB::Stream`\n- `AWS::DynamoDB::Table`\n- `AWS::EC2::Snapshot`\n- `AWS::EMRWAL::Workspace`\n- `AWS::FinSpace::Environment`\n- `AWS::Glue::Table`\n- `AWS::GreengrassV2::ComponentVersion`\n- `AWS::GreengrassV2::Deployment`\n- `AWS::GuardDuty::Detector`\n- `AWS::IoT::Certificate`\n- `AWS::IoT::Thing`\n- `AWS::IoTSiteWise::Asset`\n- `AWS::IoTSiteWise::TimeSeries`\n- `AWS::IoTTwinMaker::Entity`\n- `AWS::IoTTwinMaker::Workspace`\n- `AWS::KendraRanking::ExecutionPlan`\n- `AWS::Kinesis::Stream`\n- `AWS::Kinesis::StreamConsumer`\n- `AWS::KinesisVideo::Stream`\n- `AWS::Lambda::Function`\n- `AWS::MachineLearning::MlModel`\n- `AWS::ManagedBlockchain::Network`\n- `AWS::ManagedBlockchain::Node`\n- `AWS::MedicalImaging::Datastore`\n- `AWS::NeptuneGraph::Graph`\n- `AWS::PaymentCryptography::Alias`\n- `AWS::PaymentCryptography::Key`\n- `AWS::PCAConnectorAD::Connector`\n- `AWS::PCAConnectorSCEP::Connector`\n- `AWS::QApps:QApp`\n- `AWS::QBusiness::Application`\n- `AWS::QBusiness::DataSource`\n- `AWS::QBusiness::Index`\n- `AWS::QBusiness::WebExperience`\n- `AWS::RDS::DBCluster`\n- `AWS::S3::AccessPoint`\n- `AWS::S3::Object`\n- `AWS::S3Express::Object`\n- `AWS::S3ObjectLambda::AccessPoint`\n- `AWS::S3Outposts::Object`\n- `AWS::SageMaker::Endpoint`\n- `AWS::SageMaker::ExperimentTrialComponent`\n- `AWS::SageMaker::FeatureGroup`\n- `AWS::ServiceDiscovery::Namespace`\n- `AWS::ServiceDiscovery::Service`\n- `AWS::SCN::Instance`\n- `AWS::SNS::PlatformEndpoint`\n- `AWS::SNS::Topic`\n- `AWS::SQS::Queue`\n- `AWS::SSM::ManagedNode`\n- `AWS::SSMMessages::ControlChannel`\n- `AWS::StepFunctions::StateMachine`\n- `AWS::SWF::Domain`\n- `AWS::ThinClient::Device`\n- `AWS::ThinClient::Environment`\n- `AWS::Timestream::Database`\n- `AWS::Timestream::Table`\n- `AWS::VerifiedPermissions::PolicyStore`\n- `AWS::XRay::Trace`\n\nYou can have only one `resources.type` \ufb01eld per selector. To log data events on more than one resource type, add another selector.\n- *`resources.ARN`* - You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value. For information about filtering on the `resources.ARN` field, see [Filtering data events by resources.ARN](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/filtering-data-events.html#filtering-data-events-resourcearn) in the *AWS CloudTrail User Guide* .\n\n> You can't use the `resources.ARN` field to filter resource types that do not have ARNs.", + "Field": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `eventCategory` (required), `eventSource` , and `readOnly` .\n\nFor CloudTrail data events, supported fields include `eventCategory` (required), `resources.type` (required), `eventName` , `readOnly` , and `resources.ARN` .\n\nFor CloudTrail network activity events, supported fields include `eventCategory` (required), `eventSource` (required), `eventName` , `errorCode` , and `vpcEndpointId` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - This is an optional field that is only used for management events and data events. This field can be set to `Equals` with a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - This field is only used for management events and network activity events.\n\nFor management events, this is an optional field that can be set to `NotEquals` `kms.amazonaws.com` to exclude KMS management events, or `NotEquals` `rdsdata.amazonaws.com` to exclude RDS management events.\n\nFor network activity events, this is a required field that only uses the `Equals` operator. Set this field to the event source for which you want to log network activity events. If you want to log network activity events for multiple event sources, you must create a separate field selector for each event source.\n\nThe following are valid values for network activity events:\n\n- `cloudtrail.amazonaws.com`\n- `ec2.amazonaws.com`\n- `kms.amazonaws.com`\n- `secretsmanager.amazonaws.com`\n- *`eventName`* - This is an optional field that is only used for data events and network activity events. You can use any operator with `eventName` . You can use it to \ufb01lter in or \ufb01lter out specific events. You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This field is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n- For CloudTrail network activity events, the value must be `NetworkActivity` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`errorCode`* - This \ufb01eld is only used to filter CloudTrail network activity events and is optional. This is the error code to filter on. Currently, the only valid `errorCode` is `VpceAccessDenied` . `errorCode` can only use the `Equals` operator.\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator.\n\nFor a list of available resource types for data events, see [Data events](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-data-events-with-cloudtrail.html#logging-data-events) in the *AWS CloudTrail User Guide* .\n\nYou can have only one `resources.type` \ufb01eld per selector. To log events on more than one resource type, add another selector.\n- *`resources.ARN`* - The `resources.ARN` is an optional field for data events. You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value.\n\nFor information about filtering data events on the `resources.ARN` field, see [Filtering data events by resources.ARN](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/filtering-data-events.html#filtering-data-events-resourcearn) in the *AWS CloudTrail User Guide* .\n\n> You can't use the `resources.ARN` field to filter resource types that do not have ARNs.\n- *`vpcEndpointId`* - This \ufb01eld is only used to filter CloudTrail network activity events and is optional. This field identifies the VPC endpoint that the request passed through. You can use any operator with `vpcEndpointId` .", "NotEndsWith": "An operator that excludes events that match the last few characters of the event record field specified as the value of `Field` .", "NotEquals": "An operator that excludes events that match the exact value of the event record field specified as the value of `Field` .", "NotStartsWith": "An operator that excludes events that match the first few characters of the event record field specified as the value of `Field` .", @@ -6320,7 +7001,7 @@ "ResourcePolicy": "A JSON-formatted string for an AWS resource-based policy.\n\nThe following are requirements for the resource policy:\n\n- Contains only one action: cloudtrail-data:PutAuditEvents\n- Contains at least one statement. The policy can have a maximum of 20 statements.\n- Each statement contains at least one principal. A statement can have a maximum of 50 principals." }, "AWS::CloudTrail::Trail": { - "AdvancedEventSelectors": "Specifies the settings for advanced event selectors. You can add advanced event selectors, and conditions for your advanced event selectors, up to a maximum of 500 values for all conditions and selectors on a trail. You can use either `AdvancedEventSelectors` or `EventSelectors` , but not both. If you apply `AdvancedEventSelectors` to a trail, any existing `EventSelectors` are overwritten. For more information about advanced event selectors, see [Logging data events](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-data-events-with-cloudtrail.html) in the *AWS CloudTrail User Guide* .", + "AdvancedEventSelectors": "Specifies the settings for advanced event selectors. You can use advanced event selectors to log management events, data events for all resource types, and network activity events.\n\nYou can add advanced event selectors, and conditions for your advanced event selectors, up to a maximum of 500 values for all conditions and selectors on a trail. You can use either `AdvancedEventSelectors` or `EventSelectors` , but not both. If you apply `AdvancedEventSelectors` to a trail, any existing `EventSelectors` are overwritten. For more information about advanced event selectors, see [Logging data events](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-data-events-with-cloudtrail.html) and [Logging network activity events](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-network-events-with-cloudtrail.html) in the *AWS CloudTrail User Guide* .", "CloudWatchLogsLogGroupArn": "Specifies a log group name using an Amazon Resource Name (ARN), a unique identifier that represents the log group to which CloudTrail logs are delivered. You must use a log group that exists in your account.\n\nTo enable CloudWatch Logs delivery, you must provide values for `CloudWatchLogsLogGroupArn` and `CloudWatchLogsRoleArn` .\n\n> If you previously enabled CloudWatch Logs delivery and want to disable CloudWatch Logs delivery, you must set the values of the `CloudWatchLogsRoleArn` and `CloudWatchLogsLogGroupArn` fields to `\"\"` .", "CloudWatchLogsRoleArn": "Specifies the role for the CloudWatch Logs endpoint to assume to write to a user's log group. You must use a role that exists in your account.\n\nTo enable CloudWatch Logs delivery, you must provide values for `CloudWatchLogsLogGroupArn` and `CloudWatchLogsRoleArn` .\n\n> If you previously enabled CloudWatch Logs delivery and want to disable CloudWatch Logs delivery, you must set the values of the `CloudWatchLogsRoleArn` and `CloudWatchLogsLogGroupArn` fields to `\"\"` .", "EnableLogFileValidation": "Specifies whether log file validation is enabled. The default is false.\n\n> When you disable log file integrity validation, the chain of digest files is broken after one hour. CloudTrail does not create digest files for log files that were delivered during a period in which log file integrity validation was disabled. For example, if you enable log file integrity validation at noon on January 1, disable it at noon on January 2, and re-enable it at noon on January 10, digest files will not be created for the log files delivered from noon on January 2 to noon on January 10. The same applies whenever you stop CloudTrail logging or delete a trail.", @@ -6344,15 +7025,15 @@ "AWS::CloudTrail::Trail AdvancedFieldSelector": { "EndsWith": "An operator that includes events that match the last few characters of the event record field specified as the value of `Field` .", "Equals": "An operator that includes events that match the exact value of the event record field specified as the value of `Field` . This is the only valid operator that you can use with the `readOnly` , `eventCategory` , and `resources.type` fields.", - "Field": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `readOnly` , `eventCategory` , and `eventSource` .\n\nFor CloudTrail data events, supported fields include `readOnly` , `eventCategory` , `eventName` , `resources.type` , and `resources.ARN` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - Optional. Can be set to `Equals` a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - For filtering management events only. This can be set to `NotEquals` `kms.amazonaws.com` or `NotEquals` `rdsdata.amazonaws.com` .\n- *`eventName`* - Can use any operator. You can use it to \ufb01lter in or \ufb01lter out any data event logged to CloudTrail, such as `PutBucket` or `GetSnapshotBlock` . You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator, and the value can be one of the following:\n\n- `AWS::AppConfig::Configuration`\n- `AWS::B2BI::Transformer`\n- `AWS::Bedrock::AgentAlias`\n- `AWS::Bedrock::FlowAlias`\n- `AWS::Bedrock::Guardrail`\n- `AWS::Bedrock::KnowledgeBase`\n- `AWS::Cassandra::Table`\n- `AWS::CloudFront::KeyValueStore`\n- `AWS::CloudTrail::Channel`\n- `AWS::CloudWatch::Metric`\n- `AWS::CodeWhisperer::Customization`\n- `AWS::CodeWhisperer::Profile`\n- `AWS::Cognito::IdentityPool`\n- `AWS::DynamoDB::Stream`\n- `AWS::DynamoDB::Table`\n- `AWS::EC2::Snapshot`\n- `AWS::EMRWAL::Workspace`\n- `AWS::FinSpace::Environment`\n- `AWS::Glue::Table`\n- `AWS::GreengrassV2::ComponentVersion`\n- `AWS::GreengrassV2::Deployment`\n- `AWS::GuardDuty::Detector`\n- `AWS::IoT::Certificate`\n- `AWS::IoT::Thing`\n- `AWS::IoTSiteWise::Asset`\n- `AWS::IoTSiteWise::TimeSeries`\n- `AWS::IoTTwinMaker::Entity`\n- `AWS::IoTTwinMaker::Workspace`\n- `AWS::KendraRanking::ExecutionPlan`\n- `AWS::Kinesis::Stream`\n- `AWS::Kinesis::StreamConsumer`\n- `AWS::KinesisVideo::Stream`\n- `AWS::Lambda::Function`\n- `AWS::MachineLearning::MlModel`\n- `AWS::ManagedBlockchain::Network`\n- `AWS::ManagedBlockchain::Node`\n- `AWS::MedicalImaging::Datastore`\n- `AWS::NeptuneGraph::Graph`\n- `AWS::PaymentCryptography::Alias`\n- `AWS::PaymentCryptography::Key`\n- `AWS::PCAConnectorAD::Connector`\n- `AWS::PCAConnectorSCEP::Connector`\n- `AWS::QApps:QApp`\n- `AWS::QBusiness::Application`\n- `AWS::QBusiness::DataSource`\n- `AWS::QBusiness::Index`\n- `AWS::QBusiness::WebExperience`\n- `AWS::RDS::DBCluster`\n- `AWS::S3::AccessPoint`\n- `AWS::S3::Object`\n- `AWS::S3Express::Object`\n- `AWS::S3ObjectLambda::AccessPoint`\n- `AWS::S3Outposts::Object`\n- `AWS::SageMaker::Endpoint`\n- `AWS::SageMaker::ExperimentTrialComponent`\n- `AWS::SageMaker::FeatureGroup`\n- `AWS::ServiceDiscovery::Namespace`\n- `AWS::ServiceDiscovery::Service`\n- `AWS::SCN::Instance`\n- `AWS::SNS::PlatformEndpoint`\n- `AWS::SNS::Topic`\n- `AWS::SQS::Queue`\n- `AWS::SSM::ManagedNode`\n- `AWS::SSMMessages::ControlChannel`\n- `AWS::StepFunctions::StateMachine`\n- `AWS::SWF::Domain`\n- `AWS::ThinClient::Device`\n- `AWS::ThinClient::Environment`\n- `AWS::Timestream::Database`\n- `AWS::Timestream::Table`\n- `AWS::VerifiedPermissions::PolicyStore`\n- `AWS::XRay::Trace`\n\nYou can have only one `resources.type` \ufb01eld per selector. To log data events on more than one resource type, add another selector.\n- *`resources.ARN`* - You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value. For information about filtering on the `resources.ARN` field, see [Filtering data events by resources.ARN](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/filtering-data-events.html#filtering-data-events-resourcearn) in the *AWS CloudTrail User Guide* .\n\n> You can't use the `resources.ARN` field to filter resource types that do not have ARNs.", + "Field": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `eventCategory` (required), `eventSource` , and `readOnly` .\n\nFor CloudTrail data events, supported fields include `eventCategory` (required), `resources.type` (required), `eventName` , `readOnly` , and `resources.ARN` .\n\nFor CloudTrail network activity events, supported fields include `eventCategory` (required), `eventSource` (required), `eventName` , `errorCode` , and `vpcEndpointId` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - This is an optional field that is only used for management events and data events. This field can be set to `Equals` with a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - This field is only used for management events and network activity events.\n\nFor management events, this is an optional field that can be set to `NotEquals` `kms.amazonaws.com` to exclude KMS management events, or `NotEquals` `rdsdata.amazonaws.com` to exclude RDS management events.\n\nFor network activity events, this is a required field that only uses the `Equals` operator. Set this field to the event source for which you want to log network activity events. If you want to log network activity events for multiple event sources, you must create a separate field selector for each event source.\n\nThe following are valid values for network activity events:\n\n- `cloudtrail.amazonaws.com`\n- `ec2.amazonaws.com`\n- `kms.amazonaws.com`\n- `secretsmanager.amazonaws.com`\n- *`eventName`* - This is an optional field that is only used for data events and network activity events. You can use any operator with `eventName` . You can use it to \ufb01lter in or \ufb01lter out specific events. You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This field is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n- For CloudTrail network activity events, the value must be `NetworkActivity` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`errorCode`* - This \ufb01eld is only used to filter CloudTrail network activity events and is optional. This is the error code to filter on. Currently, the only valid `errorCode` is `VpceAccessDenied` . `errorCode` can only use the `Equals` operator.\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator.\n\nFor a list of available resource types for data events, see [Data events](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-data-events-with-cloudtrail.html#logging-data-events) in the *AWS CloudTrail User Guide* .\n\nYou can have only one `resources.type` \ufb01eld per selector. To log events on more than one resource type, add another selector.\n- *`resources.ARN`* - The `resources.ARN` is an optional field for data events. You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value.\n\nFor information about filtering data events on the `resources.ARN` field, see [Filtering data events by resources.ARN](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/filtering-data-events.html#filtering-data-events-resourcearn) in the *AWS CloudTrail User Guide* .\n\n> You can't use the `resources.ARN` field to filter resource types that do not have ARNs.\n- *`vpcEndpointId`* - This \ufb01eld is only used to filter CloudTrail network activity events and is optional. This field identifies the VPC endpoint that the request passed through. You can use any operator with `vpcEndpointId` .", "NotEndsWith": "An operator that excludes events that match the last few characters of the event record field specified as the value of `Field` .", "NotEquals": "An operator that excludes events that match the exact value of the event record field specified as the value of `Field` .", "NotStartsWith": "An operator that excludes events that match the first few characters of the event record field specified as the value of `Field` .", "StartsWith": "An operator that includes events that match the first few characters of the event record field specified as the value of `Field` ." }, "AWS::CloudTrail::Trail DataResource": { - "Type": "The resource type in which you want to log data events. You can specify the following *basic* event selector resource types:\n\n- `AWS::DynamoDB::Table`\n- `AWS::Lambda::Function`\n- `AWS::S3::Object`\n\nAdditional resource types are available through *advanced* event selectors. For more information about these additional resource types, see [AdvancedFieldSelector](https://docs.aws.amazon.com/awscloudtrail/latest/APIReference/API_AdvancedFieldSelector.html) .", - "Values": "An array of Amazon Resource Name (ARN) strings or partial ARN strings for the specified resource type.\n\n- To log data events for all objects in all S3 buckets in your AWS account , specify the prefix as `arn:aws:s3` .\n\n> This also enables logging of data event activity performed by any user or role in your AWS account , even if that activity is performed on a bucket that belongs to another AWS account .\n- To log data events for all objects in an S3 bucket, specify the bucket and an empty object prefix such as `arn:aws:s3:::DOC-EXAMPLE-BUCKET1/` . The trail logs data events for all objects in this S3 bucket.\n- To log data events for specific objects, specify the S3 bucket and object prefix such as `arn:aws:s3:::DOC-EXAMPLE-BUCKET1/example-images` . The trail logs data events for objects in this S3 bucket that match the prefix.\n- To log data events for all Lambda functions in your AWS account , specify the prefix as `arn:aws:lambda` .\n\n> This also enables logging of `Invoke` activity performed by any user or role in your AWS account , even if that activity is performed on a function that belongs to another AWS account .\n- To log data events for a specific Lambda function, specify the function ARN.\n\n> Lambda function ARNs are exact. For example, if you specify a function ARN *arn:aws:lambda:us-west-2:111111111111:function:helloworld* , data events will only be logged for *arn:aws:lambda:us-west-2:111111111111:function:helloworld* . They will not be logged for *arn:aws:lambda:us-west-2:111111111111:function:helloworld2* .\n- To log data events for all DynamoDB tables in your AWS account , specify the prefix as `arn:aws:dynamodb` ." + "Type": "The resource type in which you want to log data events. You can specify the following *basic* event selector resource types:\n\n- `AWS::DynamoDB::Table`\n- `AWS::Lambda::Function`\n- `AWS::S3::Object`\n\nAdditional resource types are available through *advanced* event selectors. For more information, see [AdvancedEventSelector](https://docs.aws.amazon.com/awscloudtrail/latest/APIReference/API_AdvancedEventSelector.html) .", + "Values": "An array of Amazon Resource Name (ARN) strings or partial ARN strings for the specified resource type.\n\n- To log data events for all objects in all S3 buckets in your AWS account , specify the prefix as `arn:aws:s3` .\n\n> This also enables logging of data event activity performed by any user or role in your AWS account , even if that activity is performed on a bucket that belongs to another AWS account .\n- To log data events for all objects in an S3 bucket, specify the bucket and an empty object prefix such as `arn:aws:s3:::amzn-s3-demo-bucket1/` . The trail logs data events for all objects in this S3 bucket.\n- To log data events for specific objects, specify the S3 bucket and object prefix such as `arn:aws:s3:::amzn-s3-demo-bucket1/example-images` . The trail logs data events for objects in this S3 bucket that match the prefix.\n- To log data events for all Lambda functions in your AWS account , specify the prefix as `arn:aws:lambda` .\n\n> This also enables logging of `Invoke` activity performed by any user or role in your AWS account , even if that activity is performed on a function that belongs to another AWS account .\n- To log data events for a specific Lambda function, specify the function ARN.\n\n> Lambda function ARNs are exact. For example, if you specify a function ARN *arn:aws:lambda:us-west-2:111111111111:function:helloworld* , data events will only be logged for *arn:aws:lambda:us-west-2:111111111111:function:helloworld* . They will not be logged for *arn:aws:lambda:us-west-2:111111111111:function:helloworld2* .\n- To log data events for all DynamoDB tables in your AWS account , specify the prefix as `arn:aws:dynamodb` ." }, "AWS::CloudTrail::Trail EventSelector": { "DataResources": "CloudTrail supports data event logging for Amazon S3 objects in standard S3 buckets, AWS Lambda functions, and Amazon DynamoDB tables with basic event selectors. You can specify up to 250 resources for an individual event selector, but the total number of data resources cannot exceed 250 across all event selectors in a trail. This limit does not apply if you configure resource logging for all data events.\n\nFor more information, see [Data Events](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-data-events-with-cloudtrail.html) and [Limits in AWS CloudTrail](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/WhatIsCloudTrail-Limits.html) in the *AWS CloudTrail User Guide* .\n\n> To log data events for all other resource types including objects stored in [directory buckets](https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html) , you must use [AdvancedEventSelectors](https://docs.aws.amazon.com/awscloudtrail/latest/APIReference/API_AdvancedEventSelector.html) . You must also use `AdvancedEventSelectors` if you want to filter on the `eventName` field.", @@ -6579,10 +7260,11 @@ }, "AWS::CodeBuild::Fleet": { "BaseCapacity": "The initial number of machines allocated to the compute \ufb02eet, which de\ufb01nes the number of builds that can run in parallel.", - "ComputeType": "Information about the compute resources the compute fleet uses. Available values include:\n\n- `BUILD_GENERAL1_SMALL` : Use up to 3 GB memory and 2 vCPUs for builds.\n- `BUILD_GENERAL1_MEDIUM` : Use up to 7 GB memory and 4 vCPUs for builds.\n- `BUILD_GENERAL1_LARGE` : Use up to 16 GB memory and 8 vCPUs for builds, depending on your environment type.\n- `BUILD_GENERAL1_XLARGE` : Use up to 70 GB memory and 36 vCPUs for builds, depending on your environment type.\n- `BUILD_GENERAL1_2XLARGE` : Use up to 145 GB memory, 72 vCPUs, and 824 GB of SSD storage for builds. This compute type supports Docker images up to 100 GB uncompressed.\n\nIf you use `BUILD_GENERAL1_SMALL` :\n\n- For environment type `LINUX_CONTAINER` , you can use up to 3 GB memory and 2 vCPUs for builds.\n- For environment type `LINUX_GPU_CONTAINER` , you can use up to 16 GB memory, 4 vCPUs, and 1 NVIDIA A10G Tensor Core GPU for builds.\n- For environment type `ARM_CONTAINER` , you can use up to 4 GB memory and 2 vCPUs on ARM-based processors for builds.\n\nIf you use `BUILD_GENERAL1_LARGE` :\n\n- For environment type `LINUX_CONTAINER` , you can use up to 15 GB memory and 8 vCPUs for builds.\n- For environment type `LINUX_GPU_CONTAINER` , you can use up to 255 GB memory, 32 vCPUs, and 4 NVIDIA Tesla V100 GPUs for builds.\n- For environment type `ARM_CONTAINER` , you can use up to 16 GB memory and 8 vCPUs on ARM-based processors for builds.\n\nFor more information, see [Build environment compute types](https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-compute-types.html) in the *AWS CodeBuild User Guide.*", - "EnvironmentType": "The environment type of the compute fleet.\n\n- The environment type `ARM_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), Asia Pacific (Mumbai), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), EU (Frankfurt), and South America (S\u00e3o Paulo).\n- The environment type `LINUX_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), South America (S\u00e3o Paulo), and Asia Pacific (Mumbai).\n- The environment type `LINUX_GPU_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), and Asia Pacific (Sydney).\n- The environment type `WINDOWS_SERVER_2019_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), Asia Pacific (Sydney), Asia Pacific (Tokyo), Asia Pacific (Mumbai) and EU (Ireland).\n- The environment type `WINDOWS_SERVER_2022_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Sydney), Asia Pacific (Singapore), Asia Pacific (Tokyo), South America (S\u00e3o Paulo) and Asia Pacific (Mumbai).\n\nFor more information, see [Build environment compute types](https://docs.aws.amazon.com//codebuild/latest/userguide/build-env-ref-compute-types.html) in the *AWS CodeBuild user guide* .", + "ComputeType": "> Updating this field is not allowed for `MAC_ARM` . \n\nInformation about the compute resources the compute fleet uses. Available values include:\n\n- `BUILD_GENERAL1_SMALL` : Use up to 3 GB memory and 2 vCPUs for builds.\n- `BUILD_GENERAL1_MEDIUM` : Use up to 7 GB memory and 4 vCPUs for builds.\n- `BUILD_GENERAL1_LARGE` : Use up to 16 GB memory and 8 vCPUs for builds, depending on your environment type.\n- `BUILD_GENERAL1_XLARGE` : Use up to 70 GB memory and 36 vCPUs for builds, depending on your environment type.\n- `BUILD_GENERAL1_2XLARGE` : Use up to 145 GB memory, 72 vCPUs, and 824 GB of SSD storage for builds. This compute type supports Docker images up to 100 GB uncompressed.\n\nIf you use `BUILD_GENERAL1_SMALL` :\n\n- For environment type `LINUX_CONTAINER` , you can use up to 3 GB memory and 2 vCPUs for builds.\n- For environment type `LINUX_GPU_CONTAINER` , you can use up to 16 GB memory, 4 vCPUs, and 1 NVIDIA A10G Tensor Core GPU for builds.\n- For environment type `ARM_CONTAINER` , you can use up to 4 GB memory and 2 vCPUs on ARM-based processors for builds.\n\nIf you use `BUILD_GENERAL1_LARGE` :\n\n- For environment type `LINUX_CONTAINER` , you can use up to 15 GB memory and 8 vCPUs for builds.\n- For environment type `LINUX_GPU_CONTAINER` , you can use up to 255 GB memory, 32 vCPUs, and 4 NVIDIA Tesla V100 GPUs for builds.\n- For environment type `ARM_CONTAINER` , you can use up to 16 GB memory and 8 vCPUs on ARM-based processors for builds.\n\nFor more information, see [Build environment compute types](https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-compute-types.html) in the *AWS CodeBuild User Guide.*", + "EnvironmentType": "> Updating this field is not allowed for `MAC_ARM` . \n\nThe environment type of the compute fleet.\n\n- The environment type `ARM_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), Asia Pacific (Mumbai), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), EU (Frankfurt), and South America (S\u00e3o Paulo).\n- The environment type `LINUX_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), South America (S\u00e3o Paulo), and Asia Pacific (Mumbai).\n- The environment type `LINUX_GPU_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), and Asia Pacific (Sydney).\n- The environment type `WINDOWS_SERVER_2019_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), Asia Pacific (Sydney), Asia Pacific (Tokyo), Asia Pacific (Mumbai) and EU (Ireland).\n- The environment type `WINDOWS_SERVER_2022_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Sydney), Asia Pacific (Singapore), Asia Pacific (Tokyo), South America (S\u00e3o Paulo) and Asia Pacific (Mumbai).\n\nFor more information, see [Build environment compute types](https://docs.aws.amazon.com//codebuild/latest/userguide/build-env-ref-compute-types.html) in the *AWS CodeBuild user guide* .", "FleetServiceRole": "The service role associated with the compute fleet. For more information, see [Allow a user to add a permission policy for a fleet service role](https://docs.aws.amazon.com/codebuild/latest/userguide/auth-and-access-control-iam-identity-based-access-control.html#customer-managed-policies-example-permission-policy-fleet-service-role.html) in the *AWS CodeBuild User Guide* .", - "FleetVpcConfig": "Information about the VPC configuration that AWS CodeBuild accesses.", + "FleetVpcConfig": "> Updating this field is not allowed for `MAC_ARM` . \n\nInformation about the VPC configuration that AWS CodeBuild accesses.", + "ImageId": "> Updating this field is not allowed for `MAC_ARM` . \n\nThe Amazon Machine Image (AMI) of the compute fleet.", "Name": "The name of the compute fleet.", "OverflowBehavior": "The compute fleet overflow behavior.\n\n- For overflow behavior `QUEUE` , your overflow builds need to wait on the existing fleet instance to become available.\n- For overflow behavior `ON_DEMAND` , your overflow builds run on CodeBuild on-demand.\n\n> If you choose to set your overflow behavior to on-demand while creating a VPC-connected fleet, make sure that you add the required VPC permissions to your project service role. For more information, see [Example policy statement to allow CodeBuild access to AWS services required to create a VPC network interface](https://docs.aws.amazon.com/codebuild/latest/userguide/auth-and-access-control-iam-identity-based-access-control.html#customer-managed-policies-example-create-vpc-network-interface) .", "Tags": "A list of tag key and value pairs associated with this compute fleet.\n\nThese tags are available for use by AWS services that support AWS CodeBuild compute fleet tags." @@ -6714,7 +7396,7 @@ "Name": "The name of either the enterprise or organization that will send webhook events to CodeBuild , depending on if the webhook is a global or organization webhook respectively." }, "AWS::CodeBuild::Project Source": { - "Auth": "Information about the authorization settings for AWS CodeBuild to access the source code to be built.\n\nThis information is for the AWS CodeBuild console's use only. Your code should not get or set `Auth` directly.", + "Auth": "Information about the authorization settings for AWS CodeBuild to access the source code to be built.", "BuildSpec": "The build specification for the project. If this value is not provided, then the source code must contain a buildspec file named `buildspec.yml` at the root level. If this value is provided, it can be either a single string containing the entire build specification, or the path to an alternate buildspec file relative to the value of the built-in environment variable `CODEBUILD_SRC_DIR` . The alternate buildspec file can have a name other than `buildspec.yml` , for example `myspec.yml` or `build_spec_qa.yml` or similar. For more information, see the [Build Spec Reference](https://docs.aws.amazon.com/codebuild/latest/userguide/build-spec-ref.html#build-spec-ref-example) in the *AWS CodeBuild User Guide* .", "BuildStatusConfig": "Contains information that defines how the build project reports the build status to the source provider. This option is only used when the source provider is `GITHUB` , `GITHUB_ENTERPRISE` , or `BITBUCKET` .", "GitCloneDepth": "The depth of history to download. Minimum value is 0. If this value is 0, greater than 25, or not provided, then the full history is downloaded with each build project. If your source type is Amazon S3, this value is not supported.", @@ -6726,8 +7408,8 @@ "Type": "The type of repository that contains the source code to be built. Valid values include:\n\n- `BITBUCKET` : The source code is in a Bitbucket repository.\n- `CODECOMMIT` : The source code is in an CodeCommit repository.\n- `CODEPIPELINE` : The source code settings are specified in the source action of a pipeline in CodePipeline.\n- `GITHUB` : The source code is in a GitHub repository.\n- `GITHUB_ENTERPRISE` : The source code is in a GitHub Enterprise Server repository.\n- `GITLAB` : The source code is in a GitLab repository.\n- `GITLAB_SELF_MANAGED` : The source code is in a self-managed GitLab repository.\n- `NO_SOURCE` : The project does not have input source code.\n- `S3` : The source code is in an Amazon S3 bucket." }, "AWS::CodeBuild::Project SourceAuth": { - "Resource": "The resource value that applies to the specified authorization type.\n\n> This data type is used by the AWS CodeBuild console only.", - "Type": "The authorization type to use. The only valid value is `OAUTH` , which represents the OAuth authorization type.\n\n> This data type is used by the AWS CodeBuild console only." + "Resource": "The resource value that applies to the specified authorization type.", + "Type": "The authorization type to use. Valid options are OAUTH, CODECONNECTIONS, or SECRETS_MANAGER." }, "AWS::CodeBuild::Project Tag": { "Key": "The tag's key.", @@ -6767,9 +7449,9 @@ "Value": "The tag's value." }, "AWS::CodeBuild::SourceCredential": { - "AuthType": "The type of authentication used by the credentials. Valid options are OAUTH, BASIC_AUTH, PERSONAL_ACCESS_TOKEN, or CODECONNECTIONS.", + "AuthType": "The type of authentication used by the credentials. Valid options are OAUTH, BASIC_AUTH, PERSONAL_ACCESS_TOKEN, CODECONNECTIONS, or SECRETS_MANAGER.", "ServerType": "The type of source provider. The valid options are GITHUB, GITHUB_ENTERPRISE, GITLAB, GITLAB_SELF_MANAGED, or BITBUCKET.", - "Token": "For GitHub or GitHub Enterprise, this is the personal access token. For Bitbucket, this is either the access token or the app password. For the `authType` CODECONNECTIONS, this is the `connectionArn` .", + "Token": "For GitHub or GitHub Enterprise, this is the personal access token. For Bitbucket, this is either the access token or the app password. For the `authType` CODECONNECTIONS, this is the `connectionArn` . For the `authType` SECRETS_MANAGER, this is the `secretArn` .", "Username": "The Bitbucket username when the `authType` is BASIC_AUTH. This parameter is not valid for other types of source providers or connections." }, "AWS::CodeCommit::Repository": { @@ -7085,15 +7767,23 @@ "ArtifactStore": "Represents information about the S3 bucket where artifacts are stored for the pipeline.\n\n> You must include either `artifactStore` or `artifactStores` in your pipeline, but you cannot use both. If you create a cross-region action in your pipeline, you must use `artifactStores` .", "Region": "The action declaration's AWS Region, such as us-east-1." }, + "AWS::CodePipeline::Pipeline BeforeEntryConditions": { + "Conditions": "The conditions that are configured as entry conditions." + }, "AWS::CodePipeline::Pipeline BlockerDeclaration": { "Name": "Reserved for future use.", "Type": "Reserved for future use." }, + "AWS::CodePipeline::Pipeline Condition": { + "Result": "The action to be done when the condition is met. For example, rolling back an execution for a failure condition.", + "Rules": "The rules that make up the condition." + }, "AWS::CodePipeline::Pipeline EncryptionKey": { "Id": "The ID used to identify the key. For an AWS KMS key, you can use the key ID, the key ARN, or the alias ARN.\n\n> Aliases are recognized only in the account that created the AWS KMS key. For cross-account actions, you can only use the key ID or key ARN to identify the key. Cross-account actions involve using the role from the other account (AccountB), so specifying the key ID will use the key from the other account (AccountB).", "Type": "The type of encryption key, such as an AWS KMS key. When creating or updating a pipeline, the value must be set to 'KMS'." }, "AWS::CodePipeline::Pipeline FailureConditions": { + "Conditions": "The conditions that are configured as failure conditions.", "Result": "The specified result for when the failure conditions are met, such as rolling back the stage." }, "AWS::CodePipeline::Pipeline GitBranchFilterCriteria": { @@ -7133,16 +7823,35 @@ "GitConfiguration": "Provides the filter criteria and the source stage for the repository event that starts the pipeline, such as Git tags.", "ProviderType": "The source provider for the event, such as connections configured for a repository with Git tags, for the specified trigger configuration." }, + "AWS::CodePipeline::Pipeline RuleDeclaration": { + "Configuration": "The action configuration fields for the rule.", + "InputArtifacts": "The input artifacts fields for the rule, such as specifying an input file for the rule.", + "Name": "The name of the rule that is created for the condition, such as CheckAllResults.", + "Region": "The Region for the condition associated with the rule.", + "RoleArn": "The pipeline role ARN associated with the rule.", + "RuleTypeId": "The ID for the rule type, which is made up of the combined values for category, owner, provider, and version." + }, + "AWS::CodePipeline::Pipeline RuleTypeId": { + "Category": "A category defines what kind of rule can be run in the stage, and constrains the provider type for the rule. The valid category is `Rule` .", + "Owner": "The creator of the rule being called. The valid value for the `Owner` field in the rule category is `AWS` .", + "Provider": "The rule provider, such as the `DeploymentWindow` rule.", + "Version": "A string that describes the rule version." + }, "AWS::CodePipeline::Pipeline StageDeclaration": { "Actions": "The actions included in a stage.", + "BeforeEntry": "The method to use when a stage allows entry. For example, configuring this field for conditions will allow entry to the stage when the conditions are met.", "Blockers": "Reserved for future use.", "Name": "The name of the stage.", - "OnFailure": "The method to use when a stage has not completed successfully. For example, configuring this field for rollback will roll back a failed stage automatically to the last successful pipeline execution in the stage." + "OnFailure": "The method to use when a stage has not completed successfully. For example, configuring this field for rollback will roll back a failed stage automatically to the last successful pipeline execution in the stage.", + "OnSuccess": "The method to use when a stage has succeeded. For example, configuring this field for conditions will allow the stage to succeed when the conditions are met." }, "AWS::CodePipeline::Pipeline StageTransition": { "Reason": "The reason given to the user that a stage is disabled, such as waiting for manual approval or manual tests. This message is displayed in the pipeline console UI.", "StageName": "The name of the stage where you want to disable the inbound or outbound transition of artifacts." }, + "AWS::CodePipeline::Pipeline SuccessConditions": { + "Conditions": "The conditions that are success conditions." + }, "AWS::CodePipeline::Pipeline Tag": { "Key": "The tag's key.", "Value": "The tag's value." @@ -7153,7 +7862,7 @@ "Name": "The name of a pipeline-level variable." }, "AWS::CodePipeline::Webhook": { - "Authentication": "Supported options are GITHUB_HMAC, IP, and UNAUTHENTICATED.\n\n> When creating CodePipeline webhooks, do not use your own credentials or reuse the same secret token across multiple webhooks. For optimal security, generate a unique secret token for each webhook you create. The secret token is an arbitrary string that you provide, which GitHub uses to compute and sign the webhook payloads sent to CodePipeline, for protecting the integrity and authenticity of the webhook payloads. Using your own credentials or reusing the same token across multiple webhooks can lead to security vulnerabilities. \n\n- For information about the authentication scheme implemented by GITHUB_HMAC, see [Securing your webhooks](https://docs.aws.amazon.com/https://developer.github.com/webhooks/securing/) on the GitHub Developer website.\n- IP rejects webhooks trigger requests unless they originate from an IP address in the IP range whitelisted in the authentication configuration.\n- UNAUTHENTICATED accepts all webhook trigger requests regardless of origin.", + "Authentication": "Supported options are GITHUB_HMAC, IP, and UNAUTHENTICATED.\n\n> When creating CodePipeline webhooks, do not use your own credentials or reuse the same secret token across multiple webhooks. For optimal security, generate a unique secret token for each webhook you create. The secret token is an arbitrary string that you provide, which GitHub uses to compute and sign the webhook payloads sent to CodePipeline, for protecting the integrity and authenticity of the webhook payloads. Using your own credentials or reusing the same token across multiple webhooks can lead to security vulnerabilities. > If a secret token was provided, it will be redacted in the response. \n\n- For information about the authentication scheme implemented by GITHUB_HMAC, see [Securing your webhooks](https://docs.aws.amazon.com/https://developer.github.com/webhooks/securing/) on the GitHub Developer website.\n- IP rejects webhooks trigger requests unless they originate from an IP address in the IP range whitelisted in the authentication configuration.\n- UNAUTHENTICATED accepts all webhook trigger requests regardless of origin.", "AuthenticationConfiguration": "Properties that configure the authentication applied to incoming webhook trigger requests. The required properties depend on the authentication type. For GITHUB_HMAC, only the `SecretToken` property must be set. For IP, only the `AllowedIPRange` property must be set to a valid CIDR range. For UNAUTHENTICATED, no properties can be set.", "Filters": "A list of rules applied to the body/payload sent in the POST request to a webhook URL. All defined rules must pass for the request to be accepted and the pipeline started.", "Name": "The name of the webhook.", @@ -7164,7 +7873,7 @@ }, "AWS::CodePipeline::Webhook WebhookAuthConfiguration": { "AllowedIPRange": "The property used to configure acceptance of webhooks in an IP address range. For IP, only the `AllowedIPRange` property must be set. This property must be set to a valid CIDR range.", - "SecretToken": "The property used to configure GitHub authentication. For GITHUB_HMAC, only the `SecretToken` property must be set.\n\n> When creating CodePipeline webhooks, do not use your own credentials or reuse the same secret token across multiple webhooks. For optimal security, generate a unique secret token for each webhook you create. The secret token is an arbitrary string that you provide, which GitHub uses to compute and sign the webhook payloads sent to CodePipeline, for protecting the integrity and authenticity of the webhook payloads. Using your own credentials or reusing the same token across multiple webhooks can lead to security vulnerabilities." + "SecretToken": "The property used to configure GitHub authentication. For GITHUB_HMAC, only the `SecretToken` property must be set.\n\n> When creating CodePipeline webhooks, do not use your own credentials or reuse the same secret token across multiple webhooks. For optimal security, generate a unique secret token for each webhook you create. The secret token is an arbitrary string that you provide, which GitHub uses to compute and sign the webhook payloads sent to CodePipeline, for protecting the integrity and authenticity of the webhook payloads. Using your own credentials or reusing the same token across multiple webhooks can lead to security vulnerabilities. > If a secret token was provided, it will be redacted in the response." }, "AWS::CodePipeline::Webhook WebhookFilterRule": { "JsonPath": "A JsonPath expression that is applied to the body/payload of the webhook. The value selected by the JsonPath expression must match the value specified in the `MatchEquals` field. Otherwise, the request is ignored. For more information, see [Java JsonPath implementation](https://docs.aws.amazon.com/https://github.com/json-path/JsonPath) in GitHub.", @@ -7243,6 +7952,7 @@ "CognitoStreams": "Configuration options for configuring Amazon Cognito streams.", "DeveloperProviderName": "The \"domain\" Amazon Cognito uses when referencing your users. This name acts as a placeholder that allows your backend and the Amazon Cognito service to communicate about the developer provider. For the `DeveloperProviderName` , you can use letters and periods (.), underscores (_), and dashes (-).\n\n*Minimum length* : 1\n\n*Maximum length* : 100", "IdentityPoolName": "The name of your Amazon Cognito identity pool.\n\n*Minimum length* : 1\n\n*Maximum length* : 128\n\n*Pattern* : `[\\w\\s+=,.@-]+`", + "IdentityPoolTags": "Tags to assign to the identity pool. A tag is a label that you can apply to identity pools to categorize and manage them in different ways, such as by purpose, owner, environment, or other criteria.", "OpenIdConnectProviderARNs": "The Amazon Resource Names (ARNs) of the OpenID connect providers.", "PushSync": "The configuration options to be applied to the identity pool.", "SamlProviderARNs": "The Amazon Resource Names (ARNs) of the Security Assertion Markup Language (SAML) providers.", @@ -7262,6 +7972,10 @@ "ApplicationArns": "The ARNs of the Amazon SNS platform applications that could be used by clients.", "RoleArn": "An IAM role configured to allow Amazon Cognito to call Amazon SNS on behalf of the developer." }, + "AWS::Cognito::IdentityPool Tag": { + "Key": "", + "Value": "" + }, "AWS::Cognito::IdentityPoolPrincipalTag": { "IdentityPoolId": "The identity pool that you want to associate with this principal tag map.", "IdentityProviderName": "The identity pool identity provider (IdP) that you want to associate with this principal tag map.", @@ -7289,31 +8003,41 @@ "Rules": "The rules. You can specify up to 25 rules per identity provider." }, "AWS::Cognito::LogDeliveryConfiguration": { - "LogConfigurations": "The detailed activity logging destination of a user pool.", - "UserPoolId": "The ID of the user pool where you configured detailed activity logging." + "LogConfigurations": "A logging destination of a user pool. User pools can have multiple logging destinations for message-delivery and user-activity logs.", + "UserPoolId": "The ID of the user pool where you configured logging." }, "AWS::Cognito::LogDeliveryConfiguration CloudWatchLogsConfiguration": { "LogGroupArn": "The Amazon Resource Name (arn) of a CloudWatch Logs log group where your user pool sends logs. The log group must not be encrypted with AWS Key Management Service and must be in the same AWS account as your user pool.\n\nTo send logs to log groups with a resource policy of a size greater than 5120 characters, configure a log group with a path that starts with `/aws/vendedlogs` . For more information, see [Enabling logging from certain AWS services](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/AWS-logs-and-resource-policy.html) ." }, + "AWS::Cognito::LogDeliveryConfiguration FirehoseConfiguration": { + "StreamArn": "The ARN of an Amazon Data Firehose stream that's the destination for advanced security features log export." + }, "AWS::Cognito::LogDeliveryConfiguration LogConfiguration": { - "CloudWatchLogsConfiguration": "The CloudWatch logging destination of a user pool detailed activity logging configuration.", - "EventSource": "The source of events that your user pool sends for detailed activity logging.", - "LogLevel": "The `errorlevel` selection of logs that a user pool sends for detailed activity logging." + "CloudWatchLogsConfiguration": "Configuration for the CloudWatch log group destination of user pool detailed activity logging, or of user activity log export with advanced security features.\n\nThis data type is a request parameter of [SetLogDeliveryConfiguration](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_SetLogDeliveryConfiguration.html) and a response parameter of [GetLogDeliveryConfiguration](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_GetLogDeliveryConfiguration.html) .", + "EventSource": "The source of events that your user pool sends for logging. To send error-level logs about user notification activity, set to `userNotification` . To send info-level logs about advanced security features user activity, set to `userAuthEvents` .", + "FirehoseConfiguration": "Configuration for the Amazon Data Firehose stream destination of user activity log export with advanced security features.", + "LogLevel": "The `errorlevel` selection of logs that a user pool sends for detailed activity logging. To send `userNotification` activity with [information about message delivery](https://docs.aws.amazon.com/cognito/latest/developerguide/tracking-quotas-and-usage-in-cloud-watch-logs.html) , choose `ERROR` with `CloudWatchLogsConfiguration` . To send `userAuthEvents` activity with user logs from advanced security features, choose `INFO` with one of `CloudWatchLogsConfiguration` , `FirehoseConfiguration` , or `S3Configuration` .", + "S3Configuration": "Configuration for the Amazon S3 bucket destination of user activity log export with advanced security features." + }, + "AWS::Cognito::LogDeliveryConfiguration S3Configuration": { + "BucketArn": "The ARN of an Amazon S3 bucket that's the destination for advanced security features log export." }, "AWS::Cognito::UserPool": { "AccountRecoverySetting": "Use this setting to define which verified available method a user can use to recover their password when they call `ForgotPassword` . It allows you to define a preferred method when a user has more than one method available. With this setting, SMS does not qualify for a valid password recovery mechanism if the user also has SMS MFA enabled. In the absence of this setting, Cognito uses the legacy behavior to determine the recovery method where SMS is preferred over email.", - "AdminCreateUserConfig": "The configuration for creating a new user profile.", + "AdminCreateUserConfig": "The settings for administrator creation of users in a user pool. Contains settings for allowing user sign-up, customizing invitation messages to new users, and the amount of time before temporary passwords expire.\n\nThis data type is a request and response parameter of [CreateUserPool](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_CreateUserPool.html) and [UpdateUserPool](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_UpdateUserPool.html) , and a response parameter of [DescribeUserPool](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_DescribeUserPool.html) .", "AliasAttributes": "Attributes supported as an alias for this user pool. Possible values: *phone_number* , *email* , or *preferred_username* .\n\n> This user pool property cannot be updated.", "AutoVerifiedAttributes": "The attributes to be auto-verified. Possible values: *email* , *phone_number* .", "DeletionProtection": "When active, `DeletionProtection` prevents accidental deletion of your user\npool. Before you can delete a user pool that you have protected against deletion, you\nmust deactivate this feature.\n\nWhen you try to delete a protected user pool in a `DeleteUserPool` API request, Amazon Cognito returns an `InvalidParameterException` error. To delete a protected user pool, send a new `DeleteUserPool` request after you deactivate deletion protection in an `UpdateUserPool` API request.", "DeviceConfiguration": "The device-remembering configuration for a user pool. A null value indicates that you have deactivated device remembering in your user pool.\n\n> When you provide a value for any `DeviceConfiguration` field, you activate the Amazon Cognito device-remembering feature.", + "EmailAuthenticationMessage": "", + "EmailAuthenticationSubject": "", "EmailConfiguration": "The email configuration of your user pool. The email configuration type sets your preferred sending method, AWS Region, and sender for messages from your user pool.", "EmailVerificationMessage": "This parameter is no longer used. See [VerificationMessageTemplateType](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_VerificationMessageTemplateType.html) .", "EmailVerificationSubject": "This parameter is no longer used. See [VerificationMessageTemplateType](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_VerificationMessageTemplateType.html) .", "EnabledMfas": "Enables MFA on a specified user pool. To disable all MFAs after it has been enabled, set MfaConfiguration to \u201cOFF\u201d and remove EnabledMfas. MFAs can only be all disabled if MfaConfiguration is OFF. Once SMS_MFA is enabled, SMS_MFA can only be disabled by setting MfaConfiguration to \u201cOFF\u201d. Can be one of the following values:\n\n- `SMS_MFA` - Enables SMS MFA for the user pool. SMS_MFA can only be enabled if SMS configuration is provided.\n- `SOFTWARE_TOKEN_MFA` - Enables software token MFA for the user pool.\n\nAllowed values: `SMS_MFA` | `SOFTWARE_TOKEN_MFA`", - "LambdaConfig": "The Lambda trigger configuration information for the new user pool.\n\n> In a push model, event sources (such as Amazon S3 and custom applications) need permission to invoke a function. So you must make an extra call to add permission for these event sources to invoke your Lambda function.\n> \n> For more information on using the Lambda API to add permission, see [AddPermission](https://docs.aws.amazon.com/lambda/latest/dg/API_AddPermission.html) .\n> \n> For adding permission using the AWS CLI , see [add-permission](https://docs.aws.amazon.com/cli/latest/reference/lambda/add-permission.html) .", + "LambdaConfig": "A collection of user pool Lambda triggers. Amazon Cognito invokes triggers at several possible stages of authentication operations. Triggers can modify the outcome of the operations that invoked them.", "MfaConfiguration": "The multi-factor authentication (MFA) configuration. Valid values include:\n\n- `OFF` MFA won't be used for any users.\n- `ON` MFA is required for all users to sign in.\n- `OPTIONAL` MFA will be required only for individual users who have an MFA factor activated.", - "Policies": "The policy associated with a user pool.", + "Policies": "A list of user pool policies. Contains the policy that sets password-complexity requirements.\n\nThis data type is a request and response parameter of [CreateUserPool](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_CreateUserPool.html) and [UpdateUserPool](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_UpdateUserPool.html) , and a response parameter of [DescribeUserPool](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_DescribeUserPool.html) .", "Schema": "The schema attributes for the new user pool. These attributes can be standard or custom attributes.\n\n> During a user pool update, you can add new schema attributes but you cannot modify or delete an existing schema attribute.", "SmsAuthenticationMessage": "A string representing the SMS authentication message.", "SmsConfiguration": "The SMS configuration with the settings that your Amazon Cognito user pool must use to send an SMS message from your AWS account through Amazon Simple Notification Service. To send SMS messages with Amazon SNS in the AWS Region that you want, the Amazon Cognito user pool uses an AWS Identity and Access Management (IAM) role in your AWS account .", @@ -7324,15 +8048,18 @@ "UserPoolTags": "The tag keys and values to assign to the user pool. A tag is a label that you can use to categorize and manage user pools in different ways, such as by purpose, owner, environment, or other criteria.", "UsernameAttributes": "Determines whether email addresses or phone numbers can be specified as user names when a user signs up. Possible values: `phone_number` or `email` .\n\nThis user pool property cannot be updated.", "UsernameConfiguration": "You can choose to set case sensitivity on the username input for the selected sign-in option. For example, when this is set to `False` , users will be able to sign in using either \"username\" or \"Username\". This configuration is immutable once it has been set.", - "VerificationMessageTemplate": "The template for the verification message that the user sees when the app requests permission to access the user's information." + "VerificationMessageTemplate": "The template for the verification message that your user pool delivers to users who set an email address or phone number attribute.\n\nSet the email message type that corresponds to your `DefaultEmailOption` selection. For `CONFIRM_WITH_LINK` , specify an `EmailMessageByLink` and leave `EmailMessage` blank. For `CONFIRM_WITH_CODE` , specify an `EmailMessage` and leave `EmailMessageByLink` blank. When you supply both parameters with either choice, Amazon Cognito returns an error." }, "AWS::Cognito::UserPool AccountRecoverySetting": { "RecoveryMechanisms": "The list of `RecoveryOptionTypes` ." }, "AWS::Cognito::UserPool AdminCreateUserConfig": { - "AllowAdminCreateUserOnly": "Set to `True` if only the administrator is allowed to create user profiles. Set to `False` if users can sign themselves up via an app.", + "AllowAdminCreateUserOnly": "The setting for allowing self-service sign-up. When `true` , only administrators can create new user profiles. When `false` , users can register themselves and create a new user profile with the [SignUp](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_SignUp.html) operation.", "InviteMessageTemplate": "The message template to be used for the welcome message to new users.\n\nSee also [Customizing User Invitation Messages](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pool-settings-message-customizations.html#cognito-user-pool-settings-user-invitation-message-customization) .", - "UnusedAccountValidityDays": "The user account expiration limit, in days, after which a new account that hasn't signed in is no longer usable. To reset the account after that time limit, you must call `AdminCreateUser` again, specifying `\"RESEND\"` for the `MessageAction` parameter. The default value for this parameter is 7.\n\n> If you set a value for `TemporaryPasswordValidityDays` in `PasswordPolicy` , that value will be used, and `UnusedAccountValidityDays` will be no longer be an available parameter for that user pool." + "UnusedAccountValidityDays": "This parameter is no longer in use. Configure the duration of temporary passwords with the `TemporaryPasswordValidityDays` parameter of [PasswordPolicyType](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_PasswordPolicyType.html) . For older user pools that have a `UnusedAccountValidityDays` configuration, that value is effective until you set a value for `TemporaryPasswordValidityDays` .\n\nThe password expiration limit in days for administrator-created users. When this time expires, the user can't sign in with their temporary password. To reset the account after that time limit, you must call `AdminCreateUser` again, specifying `RESEND` for the `MessageAction` parameter.\n\nThe default value for this parameter is 7." + }, + "AWS::Cognito::UserPool AdvancedSecurityAdditionalFlows": { + "CustomAuthMode": "" }, "AWS::Cognito::UserPool CustomEmailSender": { "LambdaArn": "The Amazon Resource Name (ARN) of the AWS Lambda function that Amazon Cognito triggers to send email notifications to users.", @@ -7359,20 +8086,20 @@ "SMSMessage": "The message template for SMS messages." }, "AWS::Cognito::UserPool LambdaConfig": { - "CreateAuthChallenge": "Creates an authentication challenge.", + "CreateAuthChallenge": "The configuration of a create auth challenge Lambda trigger, one of three triggers in the sequence of the [custom authentication challenge triggers](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-lambda-challenge.html) .", "CustomEmailSender": "A custom email sender AWS Lambda trigger.", - "CustomMessage": "A custom Message AWS Lambda trigger.", + "CustomMessage": "A custom message Lambda trigger. This trigger is an opportunity to customize all SMS and email messages from your user pool. When a custom message trigger is active, your user pool routes all messages to a Lambda function that returns a runtime-customized message subject and body for your user pool to deliver to a user.", "CustomSMSSender": "A custom SMS sender AWS Lambda trigger.", - "DefineAuthChallenge": "Defines the authentication challenge.", + "DefineAuthChallenge": "The configuration of a define auth challenge Lambda trigger, one of three triggers in the sequence of the [custom authentication challenge triggers](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-lambda-challenge.html) .", "KMSKeyID": "The Amazon Resource Name of a AWS Key Management Service ( AWS KMS ) key. Amazon Cognito uses the key to encrypt codes and temporary passwords sent to `CustomEmailSender` and `CustomSMSSender` .", - "PostAuthentication": "A post-authentication AWS Lambda trigger.", - "PostConfirmation": "A post-confirmation AWS Lambda trigger.", - "PreAuthentication": "A pre-authentication AWS Lambda trigger.", - "PreSignUp": "A pre-registration AWS Lambda trigger.", - "PreTokenGeneration": "The Amazon Resource Name (ARN) of the function that you want to assign to your Lambda trigger.\n\nSet this parameter for legacy purposes. If you also set an ARN in `PreTokenGenerationConfig` , its value must be identical to `PreTokenGeneration` . For new instances of pre token generation triggers, set the `LambdaArn` of `PreTokenGenerationConfig` .\n\nYou can set ``", - "PreTokenGenerationConfig": "The detailed configuration of a pre token generation trigger. If you also set an ARN in `PreTokenGeneration` , its value must be identical to `PreTokenGenerationConfig` .", - "UserMigration": "The user migration Lambda config type.", - "VerifyAuthChallengeResponse": "Verifies the authentication challenge response." + "PostAuthentication": "The configuration of a [post authentication Lambda trigger](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-lambda-post-authentication.html) in a user pool. This trigger can take custom actions after a user signs in.", + "PostConfirmation": "The configuration of a [post confirmation Lambda trigger](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-lambda-post-confirmation.html) in a user pool. This trigger can take custom actions after a user confirms their user account and their email address or phone number.", + "PreAuthentication": "The configuration of a [pre authentication trigger](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-lambda-pre-authentication.html) in a user pool. This trigger can evaluate and modify user sign-in events.", + "PreSignUp": "The configuration of a [pre sign-up Lambda trigger](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-lambda-pre-sign-up.html) in a user pool. This trigger evaluates new users and can bypass confirmation, [link a federated user profile](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pools-identity-federation-consolidate-users.html) , or block sign-up requests.", + "PreTokenGeneration": "The legacy configuration of a [pre token generation Lambda trigger](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-lambda-pre-token-generation.html) in a user pool.\n\nSet this parameter for legacy purposes. If you also set an ARN in `PreTokenGenerationConfig` , its value must be identical to `PreTokenGeneration` . For new instances of pre token generation triggers, set the `LambdaArn` of `PreTokenGenerationConfig` .", + "PreTokenGenerationConfig": "The detailed configuration of a [pre token generation Lambda trigger](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-lambda-pre-token-generation.html) in a user pool. If you also set an ARN in `PreTokenGeneration` , its value must be identical to `PreTokenGenerationConfig` .", + "UserMigration": "The configuration of a [migrate user Lambda trigger](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-lambda-migrate-user.html) in a user pool. This trigger can create user profiles when users sign in or attempt to reset their password with credentials that don't exist yet.", + "VerifyAuthChallengeResponse": "The configuration of a verify auth challenge Lambda trigger, one of three triggers in the sequence of the [custom authentication challenge triggers](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-lambda-challenge.html) ." }, "AWS::Cognito::UserPool NumberAttributeConstraints": { "MaxValue": "The maximum length of a number attribute value. Must be a number less than or equal to `2^1023` , represented as a string with a length of 131072 characters or fewer.", @@ -7380,14 +8107,15 @@ }, "AWS::Cognito::UserPool PasswordPolicy": { "MinimumLength": "The minimum length of the password in the policy that you have set. This value can't be less than 6.", - "RequireLowercase": "In the password policy that you have set, refers to whether you have required users to use at least one lowercase letter in their password.", - "RequireNumbers": "In the password policy that you have set, refers to whether you have required users to use at least one number in their password.", - "RequireSymbols": "In the password policy that you have set, refers to whether you have required users to use at least one symbol in their password.", - "RequireUppercase": "In the password policy that you have set, refers to whether you have required users to use at least one uppercase letter in their password.", + "PasswordHistorySize": "The number of previous passwords that you want Amazon Cognito to restrict each user from reusing. Users can't set a password that matches any of `n` previous passwords, where `n` is the value of `PasswordHistorySize` .\n\nPassword history isn't enforced and isn't displayed in [DescribeUserPool](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_DescribeUserPool.html) responses when you set this value to `0` or don't provide it. To activate this setting, [advanced security features](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pool-settings-advanced-security.html) must be active in your user pool.", + "RequireLowercase": "The requirement in a password policy that users must include at least one lowercase letter in their password.", + "RequireNumbers": "The requirement in a password policy that users must include at least one number in their password.", + "RequireSymbols": "The requirement in a password policy that users must include at least one symbol in their password.", + "RequireUppercase": "The requirement in a password policy that users must include at least one uppercase letter in their password.", "TemporaryPasswordValidityDays": "The number of days a temporary password is valid in the password policy. If the user doesn't sign in during this time, an administrator must reset their password. Defaults to `7` . If you submit a value of `0` , Amazon Cognito treats it as a null value and sets `TemporaryPasswordValidityDays` to its default value.\n\n> When you set `TemporaryPasswordValidityDays` for a user pool, you can no longer set a value for the legacy `UnusedAccountValidityDays` parameter in that user pool." }, "AWS::Cognito::UserPool Policies": { - "PasswordPolicy": "The password policy." + "PasswordPolicy": "The password policy settings for a user pool, including complexity, history, and length requirements." }, "AWS::Cognito::UserPool PreTokenGenerationConfig": { "LambdaArn": "The Amazon Resource Name (ARN) of the function that you want to assign to your Lambda trigger.\n\nThis parameter and the `PreTokenGeneration` property of `LambdaConfig` have the same value. For new instances of pre token generation triggers, set `LambdaArn` .", @@ -7413,19 +8141,20 @@ }, "AWS::Cognito::UserPool StringAttributeConstraints": { "MaxLength": "The maximum length of a string attribute value. Must be a number less than or equal to `2^1023` , represented as a string with a length of 131072 characters or fewer.", - "MinLength": "The minimum length." + "MinLength": "The minimum length of a string attribute value." }, "AWS::Cognito::UserPool UserAttributeUpdateSettings": { "AttributesRequireVerificationBeforeUpdate": "Requires that your user verifies their email address, phone number, or both before Amazon Cognito updates the value of that attribute. When you update a user attribute that has this option activated, Amazon Cognito sends a verification message to the new phone number or email address. Amazon Cognito doesn\u2019t change the value of the attribute until your user responds to the verification message and confirms the new value.\n\nYou can verify an updated email address or phone number with a [VerifyUserAttribute](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_VerifyUserAttribute.html) API request. You can also call the [AdminUpdateUserAttributes](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_AdminUpdateUserAttributes.html) API and set `email_verified` or `phone_number_verified` to true.\n\nWhen `AttributesRequireVerificationBeforeUpdate` is false, your user pool doesn't require that your users verify attribute changes before Amazon Cognito updates them. In a user pool where `AttributesRequireVerificationBeforeUpdate` is false, API operations that change attribute values can immediately update a user\u2019s `email` or `phone_number` attribute." }, "AWS::Cognito::UserPool UserPoolAddOns": { - "AdvancedSecurityMode": "The operating mode of advanced security features in your user pool." + "AdvancedSecurityAdditionalFlows": "", + "AdvancedSecurityMode": "The operating mode of advanced security features for standard authentication types in your user pool, including username-password and secure remote password (SRP) authentication." }, "AWS::Cognito::UserPool UsernameConfiguration": { - "CaseSensitive": "Specifies whether user name case sensitivity will be applied for all users in the user pool through Amazon Cognito APIs. For most use cases, set case sensitivity to `False` (case insensitive) as a best practice. When usernames and email addresses are case insensitive, users can sign in as the same user when they enter a different capitalization of their user name.\n\nValid values include:\n\n- **True** - Enables case sensitivity for all username input. When this option is set to `True` , users must sign in using the exact capitalization of their given username, such as \u201cUserName\u201d. This is the default value.\n- **False** - Enables case insensitivity for all username input. For example, when this option is set to `False` , users can sign in using `username` , `USERNAME` , or `UserName` . This option also enables both `preferred_username` and `email` alias to be case insensitive, in addition to the `username` attribute." + "CaseSensitive": "Specifies whether user name case sensitivity will be applied for all users in the user pool through Amazon Cognito APIs. For most use cases, set case sensitivity to `False` (case insensitive) as a best practice. When usernames and email addresses are case insensitive, users can sign in as the same user when they enter a different capitalization of their user name.\n\nValid values include:\n\n- **true** - Enables case sensitivity for all username input. When this option is set to `true` , users must sign in using the exact capitalization of their given username, such as \u201cUserName\u201d. This is the default value.\n- **false** - Enables case insensitivity for all username input. For example, when this option is set to `false` , users can sign in using `username` , `USERNAME` , or `UserName` . This option also enables both `preferred_username` and `email` alias to be case insensitive, in addition to the `username` attribute." }, "AWS::Cognito::UserPool VerificationMessageTemplate": { - "DefaultEmailOption": "The default email option.", + "DefaultEmailOption": "The configuration of verification emails to contain a clickable link or a verification code.\n\nFor link, your template body must contain link text in the format `{##Click here##}` . \"Click here\" in the example is a customizable string. For code, your template body must contain a code placeholder in the format `{####}` .", "EmailMessage": "The template for email messages that Amazon Cognito sends to your users. You can set an `EmailMessage` template only if the value of [EmailSendingAccount](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_EmailConfigurationType.html#CognitoUserPools-Type-EmailConfigurationType-EmailSendingAccount) is `DEVELOPER` . When your [EmailSendingAccount](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_EmailConfigurationType.html#CognitoUserPools-Type-EmailConfigurationType-EmailSendingAccount) is `DEVELOPER` , your user pool sends email messages with your own Amazon SES configuration.", "EmailMessageByLink": "The email message template for sending a confirmation link to the user. You can set an `EmailMessageByLink` template only if the value of [EmailSendingAccount](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_EmailConfigurationType.html#CognitoUserPools-Type-EmailConfigurationType-EmailSendingAccount) is `DEVELOPER` . When your [EmailSendingAccount](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_EmailConfigurationType.html#CognitoUserPools-Type-EmailConfigurationType-EmailSendingAccount) is `DEVELOPER` , your user pool sends email messages with your own Amazon SES configuration.", "EmailSubject": "The subject line for the email message template. You can set an `EmailSubject` template only if the value of [EmailSendingAccount](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_EmailConfigurationType.html#CognitoUserPools-Type-EmailConfigurationType-EmailSendingAccount) is `DEVELOPER` . When your [EmailSendingAccount](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_EmailConfigurationType.html#CognitoUserPools-Type-EmailConfigurationType-EmailSendingAccount) is `DEVELOPER` , your user pool sends email messages with your own Amazon SES configuration.", @@ -7449,7 +8178,7 @@ "IdTokenValidity": "The ID token time limit. After this limit expires, your user can't use their ID token. To specify the time unit for `IdTokenValidity` as `seconds` , `minutes` , `hours` , or `days` , set a `TokenValidityUnits` value in your API request.\n\nFor example, when you set `IdTokenValidity` as `10` and `TokenValidityUnits` as `hours` , your user can authenticate their session with their ID token for 10 hours.\n\nThe default time unit for `IdTokenValidity` in an API request is hours.", "LogoutURLs": "A list of allowed logout URLs for the IdPs.", "PreventUserExistenceErrors": "Use this setting to choose which errors and responses are returned by Cognito APIs during authentication, account confirmation, and password recovery when the user does not exist in the user pool. When set to `ENABLED` and the user does not exist, authentication returns an error indicating either the username or password was incorrect, and account confirmation and password recovery return a response indicating a code was sent to a simulated destination. When set to `LEGACY` , those APIs will return a `UserNotFoundException` exception if the user does not exist in the user pool.", - "ReadAttributes": "The list of user attributes that you want your app client to have read-only access to. After your user authenticates in your app, their access token authorizes them to read their own attribute value for any attribute in this list. An example of this kind of activity is when your user selects a link to view their profile information. Your app makes a [GetUser](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_GetUser.html) API request to retrieve and display your user's profile data.\n\nWhen you don't specify the `ReadAttributes` for your app client, your app can read the values of `email_verified` , `phone_number_verified` , and the Standard attributes of your user pool. When your user pool has read access to these default attributes, `ReadAttributes` doesn't return any information. Amazon Cognito only populates `ReadAttributes` in the API response if you have specified your own custom set of read attributes.", + "ReadAttributes": "The list of user attributes that you want your app client to have read access to. After your user authenticates in your app, their access token authorizes them to read their own attribute value for any attribute in this list. An example of this kind of activity is when your user selects a link to view their profile information. Your app makes a [GetUser](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_GetUser.html) API request to retrieve and display your user's profile data.\n\nWhen you don't specify the `ReadAttributes` for your app client, your app can read the values of `email_verified` , `phone_number_verified` , and the Standard attributes of your user pool. When your user pool app client has read access to these default attributes, `ReadAttributes` doesn't return any information. Amazon Cognito only populates `ReadAttributes` in the API response if you have specified your own custom set of read attributes.", "RefreshTokenValidity": "The refresh token time limit. After this limit expires, your user can't use their refresh token. To specify the time unit for `RefreshTokenValidity` as `seconds` , `minutes` , `hours` , or `days` , set a `TokenValidityUnits` value in your API request.\n\nFor example, when you set `RefreshTokenValidity` as `10` and `TokenValidityUnits` as `days` , your user can refresh their session and retrieve new access and ID tokens for 10 days.\n\nThe default time unit for `RefreshTokenValidity` in an API request is days. You can't set `RefreshTokenValidity` to 0. If you do, Amazon Cognito overrides the value with the default value of 30 days.", "SupportedIdentityProviders": "A list of provider names for the identity providers (IdPs) that are supported on this client. The following are supported: `COGNITO` , `Facebook` , `Google` , `SignInWithApple` , and `LoginWithAmazon` . You can also specify the names that you configured for the SAML and OIDC IdPs in your user pool, for example `MySAMLIdP` or `MyOIDCIdP` .", "TokenValidityUnits": "The units in which the validity times are represented. The default unit for RefreshToken is days, and default for ID and access tokens are hours.", @@ -7458,15 +8187,15 @@ }, "AWS::Cognito::UserPoolClient AnalyticsConfiguration": { "ApplicationArn": "The Amazon Resource Name (ARN) of an Amazon Pinpoint project. You can use the Amazon Pinpoint project for integration with the chosen user pool client. Amazon Cognito publishes events to the Amazon Pinpoint project that the app ARN declares.", - "ApplicationId": "The application ID for an Amazon Pinpoint application.", - "ExternalId": "The external ID.", - "RoleArn": "The ARN of an AWS Identity and Access Management role that authorizes Amazon Cognito to publish events to Amazon Pinpoint analytics.", + "ApplicationId": "Your Amazon Pinpoint project ID.", + "ExternalId": "The [external ID](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html) of the role that Amazon Cognito assumes to send analytics data to Amazon Pinpoint.", + "RoleArn": "The ARN of an AWS Identity and Access Management role that has the permissions required for Amazon Cognito to publish events to Amazon Pinpoint analytics.", "UserDataShared": "If `UserDataShared` is `true` , Amazon Cognito includes user data in the events that it publishes to Amazon Pinpoint analytics." }, "AWS::Cognito::UserPoolClient TokenValidityUnits": { - "AccessToken": "A time unit of `seconds` , `minutes` , `hours` , or `days` for the value that you set in the `AccessTokenValidity` parameter. The default `AccessTokenValidity` time unit is hours. `AccessTokenValidity` duration can range from five minutes to one day.", - "IdToken": "A time unit of `seconds` , `minutes` , `hours` , or `days` for the value that you set in the `IdTokenValidity` parameter. The default `IdTokenValidity` time unit is hours. `IdTokenValidity` duration can range from five minutes to one day.", - "RefreshToken": "A time unit of `seconds` , `minutes` , `hours` , or `days` for the value that you set in the `RefreshTokenValidity` parameter. The default `RefreshTokenValidity` time unit is days. `RefreshTokenValidity` duration can range from 60 minutes to 10 years." + "AccessToken": "A time unit for the value that you set in the `AccessTokenValidity` parameter. The default `AccessTokenValidity` time unit is `hours` . `AccessTokenValidity` duration can range from five minutes to one day.", + "IdToken": "A time unit for the value that you set in the `IdTokenValidity` parameter. The default `IdTokenValidity` time unit is `hours` . `IdTokenValidity` duration can range from five minutes to one day.", + "RefreshToken": "A time unit for the value that you set in the `RefreshTokenValidity` parameter. The default `RefreshTokenValidity` time unit is `days` . `RefreshTokenValidity` duration can range from 60 minutes to 10 years." }, "AWS::Cognito::UserPoolDomain": { "CustomDomainConfig": "The configuration for a custom domain that hosts the sign-up and sign-in pages for your application. Use this object to specify an SSL certificate that is managed by ACM.", @@ -7498,52 +8227,52 @@ "UserPoolId": "The user pool ID for the user pool." }, "AWS::Cognito::UserPoolResourceServer ResourceServerScopeType": { - "ScopeDescription": "A description of the scope.", - "ScopeName": "The name of the scope." + "ScopeDescription": "A friendly description of a custom scope.", + "ScopeName": "The name of the scope. Amazon Cognito renders custom scopes in the format `resourceServerIdentifier/ScopeName` . For example, if this parameter is `exampleScope` in the resource server with the identifier `exampleResourceServer` , you request and receive the scope `exampleResourceServer/exampleScope` ." }, "AWS::Cognito::UserPoolRiskConfigurationAttachment": { - "AccountTakeoverRiskConfiguration": "The account takeover risk configuration object, including the `NotifyConfiguration` object and `Actions` to take if there is an account takeover.", + "AccountTakeoverRiskConfiguration": "The settings for automated responses and notification templates for adaptive authentication with advanced security features.", "ClientId": "The app client ID. You can specify the risk configuration for a single client (with a specific ClientId) or for all clients (by setting the ClientId to `ALL` ).", - "CompromisedCredentialsRiskConfiguration": "The compromised credentials risk configuration object, including the `EventFilter` and the `EventAction` .", - "RiskExceptionConfiguration": "The configuration to override the risk decision.", - "UserPoolId": "The user pool ID." + "CompromisedCredentialsRiskConfiguration": "Settings for compromised-credentials actions and authentication types with advanced security features in full-function `ENFORCED` mode.", + "RiskExceptionConfiguration": "Exceptions to the risk evaluation configuration, including always-allow and always-block IP address ranges.", + "UserPoolId": "The ID of the user pool that has the risk configuration applied." }, "AWS::Cognito::UserPoolRiskConfigurationAttachment AccountTakeoverActionType": { - "EventAction": "The action to take in response to the account takeover action. Valid values are as follows:\n\n- `BLOCK` Choosing this action will block the request.\n- `MFA_IF_CONFIGURED` Present an MFA challenge if user has configured it, else allow the request.\n- `MFA_REQUIRED` Present an MFA challenge if user has configured it, else block the request.\n- `NO_ACTION` Allow the user to sign in.", - "Notify": "Flag specifying whether to send a notification." + "EventAction": "The action to take for the attempted account takeover action for the associated risk level. Valid values are as follows:\n\n- `BLOCK` : Block the request.\n- `MFA_IF_CONFIGURED` : Present an MFA challenge if possible. MFA is possible if the user pool has active MFA methods that the user can set up. For example, if the user pool only supports SMS message MFA but the user doesn't have a phone number attribute, MFA setup isn't possible. If MFA setup isn't possible, allow the request.\n- `MFA_REQUIRED` : Present an MFA challenge if possible. Block the request if a user hasn't set up MFA. To sign in with required MFA, users must have an email address or phone number attribute, or a registered TOTP factor.\n- `NO_ACTION` : Take no action. Permit sign-in.", + "Notify": "Determines whether Amazon Cognito sends a user a notification message when your user pools assesses a user's session at the associated risk level." }, "AWS::Cognito::UserPoolRiskConfigurationAttachment AccountTakeoverActionsType": { - "HighAction": "Action to take for a high risk.", - "LowAction": "Action to take for a low risk.", - "MediumAction": "Action to take for a medium risk." + "HighAction": "The action that you assign to a high-risk assessment by advanced security features.", + "LowAction": "The action that you assign to a low-risk assessment by advanced security features.", + "MediumAction": "The action that you assign to a medium-risk assessment by advanced security features." }, "AWS::Cognito::UserPoolRiskConfigurationAttachment AccountTakeoverRiskConfigurationType": { - "Actions": "Account takeover risk configuration actions.", - "NotifyConfiguration": "The notify configuration used to construct email notifications." + "Actions": "A list of account-takeover actions for each level of risk that Amazon Cognito might assess with advanced security features.", + "NotifyConfiguration": "The settings for composing and sending an email message when advanced security features assesses a risk level with adaptive authentication. When you choose to notify users in `AccountTakeoverRiskConfiguration` , Amazon Cognito sends an email message using the method and template that you set with this data type." }, "AWS::Cognito::UserPoolRiskConfigurationAttachment CompromisedCredentialsActionsType": { - "EventAction": "The event action." + "EventAction": "The action that Amazon Cognito takes when it detects compromised credentials." }, "AWS::Cognito::UserPoolRiskConfigurationAttachment CompromisedCredentialsRiskConfigurationType": { - "Actions": "The compromised credentials risk configuration actions.", - "EventFilter": "Perform the action for these events. The default is to perform all events if no event filter is specified." + "Actions": "Settings for the actions that you want your user pool to take when Amazon Cognito detects compromised credentials.", + "EventFilter": "Settings for the sign-in activity where you want to configure compromised-credentials actions. Defaults to all events." }, "AWS::Cognito::UserPoolRiskConfigurationAttachment NotifyConfigurationType": { - "BlockEmail": "Email template used when a detected risk event is blocked.", - "From": "The email address that is sending the email. The address must be either individually verified with Amazon Simple Email Service, or from a domain that has been verified with Amazon SES.", - "MfaEmail": "The multi-factor authentication (MFA) email template used when MFA is challenged as part of a detected risk.", - "NoActionEmail": "The email template used when a detected risk event is allowed.", - "ReplyTo": "The destination to which the receiver of an email should reply to.", + "BlockEmail": "The template for the email message that your user pool sends when a detected risk event is blocked.", + "From": "The email address that sends the email message. The address must be either individually verified with Amazon Simple Email Service, or from a domain that has been verified with Amazon SES.", + "MfaEmail": "The template for the email message that your user pool sends when MFA is challenged in response to a detected risk.", + "NoActionEmail": "The template for the email message that your user pool sends when no action is taken in response to a detected risk.", + "ReplyTo": "The reply-to email address of an email template.", "SourceArn": "The Amazon Resource Name (ARN) of the identity that is associated with the sending authorization policy. This identity permits Amazon Cognito to send for the email address specified in the `From` parameter." }, "AWS::Cognito::UserPoolRiskConfigurationAttachment NotifyEmailType": { - "HtmlBody": "The email HTML body.", - "Subject": "The email subject.", - "TextBody": "The email text body." + "HtmlBody": "The body of an email notification formatted in HTML. Choose an `HtmlBody` or a `TextBody` to send an HTML-formatted or plaintext message, respectively.", + "Subject": "The subject of the threat protection email notification.", + "TextBody": "The body of an email notification formatted in plaintext. Choose an `HtmlBody` or a `TextBody` to send an HTML-formatted or plaintext message, respectively." }, "AWS::Cognito::UserPoolRiskConfigurationAttachment RiskExceptionConfigurationType": { - "BlockedIPRangeList": "Overrides the risk decision to always block the pre-authentication requests. The IP range is in CIDR notation, a compact representation of an IP address and its routing prefix.", - "SkippedIPRangeList": "Risk detection isn't performed on the IP addresses in this range list. The IP range is in CIDR notation." + "BlockedIPRangeList": "An always-block IP address list. Overrides the risk decision and always blocks authentication requests. This parameter is displayed and set in CIDR notation.", + "SkippedIPRangeList": "An always-allow IP address list. Risk detection isn't performed on the IP addresses in this range list. This parameter is displayed and set in CIDR notation." }, "AWS::Cognito::UserPoolUICustomizationAttachment": { "CSS": "The CSS values in the UI customization.", @@ -7868,6 +8597,20 @@ "Key": "One part of a key-value pair that make up a tag. A key is a general label that acts like a category for more specific tag values.", "Value": "The optional part of a key-value pair that make up a tag. A value acts as a descriptor within a tag category (key)." }, + "AWS::Connect::AgentStatus": { + "Description": "The description of the agent status.", + "DisplayOrder": "The display order of the agent status.", + "InstanceArn": "The Amazon Resource Name (ARN) of the instance.", + "Name": "The name of the agent status.", + "ResetOrderNumber": "A number indicating the reset order of the agent status.", + "State": "The state of the agent status.", + "Tags": "The tags used to organize, track, or control access for this resource. For example, { \"Tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }.", + "Type": "The type of agent status." + }, + "AWS::Connect::AgentStatus Tag": { + "Key": "The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -", + "Value": "The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -" + }, "AWS::Connect::ApprovedOrigin": { "InstanceId": "The Amazon Resource Name (ARN) of the instance.\n\n*Minimum* : `1`\n\n*Maximum* : `100`", "Origin": "Domain name to be added to the allow-list of the instance.\n\n*Maximum* : `267`" @@ -8361,6 +9104,42 @@ "Key": "The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -", "Value": "The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -" }, + "AWS::Connect::UserHierarchyStructure": { + "InstanceArn": "The Amazon Resource Name (ARN) of the instance.", + "UserHierarchyStructure": "Contains information about a hierarchy structure." + }, + "AWS::Connect::UserHierarchyStructure LevelFive": { + "HierarchyLevelArn": "The Amazon Resource Name (ARN) of the hierarchy level.", + "HierarchyLevelId": "The identifier of the hierarchy level.", + "Name": "The name of the hierarchy level." + }, + "AWS::Connect::UserHierarchyStructure LevelFour": { + "HierarchyLevelArn": "The Amazon Resource Name (ARN) of the hierarchy level.", + "HierarchyLevelId": "The identifier of the hierarchy level.", + "Name": "The name of the hierarchy level." + }, + "AWS::Connect::UserHierarchyStructure LevelOne": { + "HierarchyLevelArn": "The Amazon Resource Name (ARN) of the hierarchy level.", + "HierarchyLevelId": "The identifier of the hierarchy level.", + "Name": "The name of the hierarchy level." + }, + "AWS::Connect::UserHierarchyStructure LevelThree": { + "HierarchyLevelArn": "The Amazon Resource Name (ARN) of the hierarchy level.", + "HierarchyLevelId": "", + "Name": "The name of the hierarchy level." + }, + "AWS::Connect::UserHierarchyStructure LevelTwo": { + "HierarchyLevelArn": "The Amazon Resource Name (ARN) of the hierarchy level.", + "HierarchyLevelId": "The identifier of the hierarchy level.", + "Name": "The name of the hierarchy level." + }, + "AWS::Connect::UserHierarchyStructure UserHierarchyStructure": { + "LevelFive": "", + "LevelFour": "The update for level four.", + "LevelOne": "The update for level one.", + "LevelThree": "The update for level three.", + "LevelTwo": "The update for level two." + }, "AWS::Connect::View": { "Actions": "A list of actions possible from the view.", "Description": "The description of the view.", @@ -9834,12 +10613,12 @@ "Value": "The value for an AWS resource tag." }, "AWS::DataSync::LocationEFS": { - "AccessPointArn": "Specifies the Amazon Resource Name (ARN) of the access point that DataSync uses to access the Amazon EFS file system.", - "Ec2Config": "Specifies the subnet and security groups DataSync uses to access your Amazon EFS file system.", - "EfsFilesystemArn": "Specifies the ARN for the Amazon EFS file system.", - "FileSystemAccessRoleArn": "Specifies an AWS Identity and Access Management (IAM) role that DataSync assumes when mounting the Amazon EFS file system.", - "InTransitEncryption": "Specifies whether you want DataSync to use Transport Layer Security (TLS) 1.2 encryption when it copies data to or from the Amazon EFS file system.\n\nIf you specify an access point using `AccessPointArn` or an IAM role using `FileSystemAccessRoleArn` , you must set this parameter to `TLS1_2` .", - "Subdirectory": "Specifies a mount path for your Amazon EFS file system. This is where DataSync reads or writes data (depending on if this is a source or destination location). By default, DataSync uses the root directory, but you can also include subdirectories.\n\n> You must specify a value with forward slashes (for example, `/path/to/folder` ).", + "AccessPointArn": "Specifies the Amazon Resource Name (ARN) of the access point that DataSync uses to mount your Amazon EFS file system.\n\nFor more information, see [Accessing restricted file systems](https://docs.aws.amazon.com/datasync/latest/userguide/create-efs-location.html#create-efs-location-iam) .", + "Ec2Config": "Specifies the subnet and security groups DataSync uses to connect to one of your Amazon EFS file system's [mount targets](https://docs.aws.amazon.com/efs/latest/ug/accessing-fs.html) .", + "EfsFilesystemArn": "Specifies the ARN for your Amazon EFS file system.", + "FileSystemAccessRoleArn": "Specifies an AWS Identity and Access Management (IAM) role that allows DataSync to access your Amazon EFS file system.\n\nFor information on creating this role, see [Creating a DataSync IAM role for file system access](https://docs.aws.amazon.com/datasync/latest/userguide/create-efs-location.html#create-efs-location-iam-role) .", + "InTransitEncryption": "Specifies whether you want DataSync to use Transport Layer Security (TLS) 1.2 encryption when it transfers data to or from your Amazon EFS file system.\n\nIf you specify an access point using `AccessPointArn` or an IAM role using `FileSystemAccessRoleArn` , you must set this parameter to `TLS1_2` .", + "Subdirectory": "Specifies a mount path for your Amazon EFS file system. This is where DataSync reads or writes data (depending on if this is a source or destination location) on your file system.\n\nBy default, DataSync uses the root directory (or [access point](https://docs.aws.amazon.com/efs/latest/ug/efs-access-points.html) if you provide one by using `AccessPointArn` ). You can also include subdirectories using forward slashes (for example, `/path/to/folder` ).", "Tags": "Specifies the key-value pair that represents a tag that you want to add to the resource. The value can be an empty string. This value helps you manage, filter, and search for your resources. We recommend that you create a name tag for your location." }, "AWS::DataSync::LocationEFS Ec2Config": { @@ -9924,7 +10703,7 @@ "Value": "The value for an AWS resource tag." }, "AWS::DataSync::LocationHDFS": { - "AgentArns": "The Amazon Resource Names (ARNs) of the agents that are used to connect to the HDFS cluster.", + "AgentArns": "The Amazon Resource Names (ARNs) of the DataSync agents that can connect to your HDFS cluster.", "AuthenticationType": "", "BlockSize": "The size of data blocks to write into the HDFS cluster. The block size must be a multiple of 512 bytes. The default block size is 128 mebibytes (MiB).", "KerberosKeytab": "The Kerberos key table (keytab) that contains mappings between the defined Kerberos principal and the encrypted keys. Provide the base64-encoded file text. If `KERBEROS` is specified for `AuthType` , this value is required.", @@ -9952,7 +10731,7 @@ }, "AWS::DataSync::LocationNFS": { "MountOptions": "Specifies the options that DataSync can use to mount your NFS file server.", - "OnPremConfig": "Specifies the Amazon Resource Name (ARN) of the DataSync agent that want to connect to your NFS file server.\n\nYou can specify more than one agent. For more information, see [Using multiple agents for transfers](https://docs.aws.amazon.com/datasync/latest/userguide/multiple-agents.html) .", + "OnPremConfig": "Specifies the Amazon Resource Name (ARN) of the DataSync agent that can connect to your NFS file server.\n\nYou can specify more than one agent. For more information, see [Using multiple DataSync agents](https://docs.aws.amazon.com/datasync/latest/userguide/do-i-need-datasync-agent.html#multiple-agents) .", "ServerHostname": "Specifies the Domain Name System (DNS) name or IP version 4 address of the NFS file server that your DataSync agent connects to.", "Subdirectory": "Specifies the export path in your NFS file server that you want DataSync to mount.\n\nThis path (or a subdirectory of the path) is where DataSync transfers data to or from. For information on configuring an export for DataSync, see [Accessing NFS file servers](https://docs.aws.amazon.com/datasync/latest/userguide/create-nfs-location.html#accessing-nfs) .", "Tags": "Specifies labels that help you categorize, filter, and search for your AWS resources. We recommend creating at least a name tag for your location." @@ -9961,7 +10740,7 @@ "Version": "Specifies the NFS version that you want DataSync to use when mounting your NFS share. If the server refuses to use the version specified, the task fails.\n\nYou can specify the following options:\n\n- `AUTOMATIC` (default): DataSync chooses NFS version 4.1.\n- `NFS3` : Stateless protocol version that allows for asynchronous writes on the server.\n- `NFSv4_0` : Stateful, firewall-friendly protocol version that supports delegations and pseudo file systems.\n- `NFSv4_1` : Stateful protocol version that supports sessions, directory delegations, and parallel data processing. NFS version 4.1 also includes all features available in version 4.0.\n\n> DataSync currently only supports NFS version 3 with Amazon FSx for NetApp ONTAP locations." }, "AWS::DataSync::LocationNFS OnPremConfig": { - "AgentArns": "The Amazon Resource Names (ARNs) of the agents connecting to a transfer location." + "AgentArns": "The Amazon Resource Names (ARNs) of the DataSync agents that can connect to your NFS file server.\n\nYou can specify more than one agent. For more information, see [Using multiple DataSync agents](https://docs.aws.amazon.com/datasync/latest/userguide/do-i-need-datasync-agent.html#multiple-agents) ." }, "AWS::DataSync::LocationNFS Tag": { "Key": "The key for an AWS resource tag.", @@ -9969,7 +10748,7 @@ }, "AWS::DataSync::LocationObjectStorage": { "AccessKey": "Specifies the access key (for example, a user name) if credentials are required to authenticate with the object storage server.", - "AgentArns": "Specifies the Amazon Resource Names (ARNs) of the DataSync agents that can securely connect with your location.", + "AgentArns": "Specifies the Amazon Resource Names (ARNs) of the DataSync agents that can connect with your object storage system.", "BucketName": "Specifies the name of the object storage bucket involved in the transfer.", "SecretKey": "Specifies the secret key (for example, a password) if credentials are required to authenticate with the object storage server.", "ServerCertificate": "Specifies a certificate chain for DataSync to authenticate with your object storage system if the system uses a private or self-signed certificate authority (CA). You must specify a single `.pem` file with a full certificate chain (for example, `file:///home/user/.ssh/object_storage_certificates.pem` ).\n\nThe certificate chain might include:\n\n- The object storage system's certificate\n- All intermediate certificates (if there are any)\n- The root certificate of the signing CA\n\nYou can concatenate your certificates into a `.pem` file (which can be up to 32768 bytes before base64 encoding). The following example `cat` command creates an `object_storage_certificates.pem` file that includes three certificates:\n\n`cat object_server_certificate.pem intermediate_certificate.pem ca_root_certificate.pem > object_storage_certificates.pem`\n\nTo use this parameter, configure `ServerProtocol` to `HTTPS` .", @@ -10115,7 +10894,7 @@ "Subdirectory": "Specifies a bucket prefix for your report." }, "AWS::DataSync::Task TaskSchedule": { - "ScheduleExpression": "Specifies your task schedule by using a cron expression in UTC time. For information about cron expression syntax, see the [*Amazon EventBridge User Guide*](https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-cron-expressions.html) .", + "ScheduleExpression": "Specifies your task schedule by using a cron or rate expression.\n\nUse cron expressions for task schedules that run on a specific time and day. For example, the following cron expression creates a task schedule that runs at 8 AM on the first Wednesday of every month:\n\n`cron(0 8 * * 3#1)`\n\nUse rate expressions for task schedules that run on a regular interval. For example, the following rate expression creates a task schedule that runs every 12 hours:\n\n`rate(12 hours)`\n\nFor information about cron and rate expression syntax, see the [*Amazon EventBridge User Guide*](https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-scheduled-rule-pattern.html) .", "Status": "Specifies whether to enable or disable your task schedule. Your schedule is enabled by default, but there can be situations where you need to disable it. For example, you might need to perform maintenance on a storage system before you can begin a recurring DataSync transfer.\n\nDataSync might disable your schedule automatically if your task fails repeatedly with the same error. For more information, see the [*DataSync User Guide*](https://docs.aws.amazon.com/datasync/latest/userguide/task-scheduling.html#pause-task-schedule) ." }, "AWS::DataSync::Task Transferred": { @@ -10207,7 +10986,10 @@ "AWS::DataZone::Environment": { "Description": "The description of the environment.", "DomainIdentifier": "The identifier of the Amazon DataZone domain in which the environment is created.", + "EnvironmentAccountIdentifier": "The identifier of the AWS account in which an environment exists.", + "EnvironmentAccountRegion": "The AWS Region in which an environment exists.", "EnvironmentProfileIdentifier": "The identifier of the environment profile that is used to create this Amazon DataZone environment.", + "EnvironmentRoleArn": "The ARN of the environment role.", "GlossaryTerms": "The glossary terms that can be used in this Amazon DataZone environment.", "Name": "The name of the Amazon DataZone environment.", "ProjectIdentifier": "The identifier of the Amazon DataZone project in which this environment is created.", @@ -10217,6 +10999,17 @@ "Name": "The name of the environment parameter.", "Value": "The value of the environment parameter." }, + "AWS::DataZone::EnvironmentActions": { + "Description": "", + "DomainIdentifier": "The Amazon DataZone domain ID of the environment action.", + "EnvironmentIdentifier": "The environment ID of the environment action.", + "Identifier": "The ID of the environment action.", + "Name": "The name of the environment action.", + "Parameters": "" + }, + "AWS::DataZone::EnvironmentActions AwsConsoleLinkParameters": { + "Uri": "The URI of the console link specified as part of the environment action." + }, "AWS::DataZone::EnvironmentBlueprintConfiguration": { "DomainIdentifier": "The identifier of the Amazon DataZone domain in which an environment blueprint exists.", "EnabledRegions": "The enabled AWS Regions specified in a blueprint configuration.", @@ -10299,7 +11092,7 @@ }, "AWS::Deadline::Farm": { "Description": "A description of the farm that helps identify what the farm is used for.", - "DisplayName": "The display name of the farm.", + "DisplayName": "The display name of the farm.\n\n> This field can store any content. Escape or encode this content before displaying it on a webpage or any other system that might interpret the content of this field.", "KmsKeyArn": "The ARN for the KMS key.", "Tags": "The tags to add to your farm. Each tag consists of a tag key and a tag value. Tag keys and values are both required, but tag values can be empty strings." }, @@ -10310,7 +11103,7 @@ "AWS::Deadline::Fleet": { "Configuration": "The configuration details for the fleet.", "Description": "A description that helps identify what the fleet is used for.", - "DisplayName": "The display name of the fleet summary to update.", + "DisplayName": "The display name of the fleet summary to update.\n\n> This field can store any content. Escape or encode this content before displaying it on a webpage or any other system that might interpret the content of this field.", "FarmId": "The farm ID.", "MaxWorkerCount": "The maximum number of workers specified in the fleet.", "MinWorkerCount": "The minimum number of workers in the fleet.", @@ -10408,7 +11201,7 @@ "ProductId": "The product ID." }, "AWS::Deadline::Monitor": { - "DisplayName": "The name of the monitor that displays on the Deadline Cloud console.", + "DisplayName": "The name of the monitor that displays on the Deadline Cloud console.\n\n> This field can store any content. Escape or encode this content before displaying it on a webpage or any other system that might interpret the content of this field.", "IdentityCenterInstanceArn": "The Amazon Resource Name (ARN) of the IAM Identity Center instance responsible for authenticating monitor users.", "RoleArn": "The Amazon Resource Name (ARN) of the IAM role for the monitor. Users of the monitor use this role to access Deadline Cloud resources.", "Subdomain": "The subdomain used for the monitor URL. The full URL of the monitor is subdomain.Region.deadlinecloud.amazonaws.com." @@ -10417,7 +11210,7 @@ "AllowedStorageProfileIds": "The identifiers of the storage profiles that this queue can use to share assets between workers using different operating systems.", "DefaultBudgetAction": "The default action taken on a queue summary if a budget wasn't configured.", "Description": "A description of the queue that helps identify what the queue is used for.", - "DisplayName": "The display name of the queue summary to update.", + "DisplayName": "The display name of the queue summary to update.\n\n> This field can store any content. Escape or encode this content before displaying it on a webpage or any other system that might interpret the content of this field.", "FarmId": "The farm ID.", "JobAttachmentSettings": "The job attachment settings. These are the Amazon S3 bucket name and the Amazon S3 prefix.", "JobRunAsUser": "Identifies the user for a job.", @@ -10459,7 +11252,7 @@ "QueueId": "The queue ID." }, "AWS::Deadline::StorageProfile": { - "DisplayName": "The display name of the storage profile summary to update.", + "DisplayName": "The display name of the storage profile summary to update.\n\n> This field can store any content. Escape or encode this content before displaying it on a webpage or any other system that might interpret the content of this field.", "FarmId": "The unique identifier of the farm that contains the storage profile.", "FileSystemLocations": "Operating system specific file system path to the storage location.", "OsFamily": "The operating system (OS) family." @@ -10634,13 +11427,13 @@ "AWS::DocDB::DBCluster": { "AvailabilityZones": "A list of Amazon EC2 Availability Zones that instances in the cluster can be created in.", "BackupRetentionPeriod": "The number of days for which automated backups are retained. You must specify a minimum value of 1.\n\nDefault: 1\n\nConstraints:\n\n- Must be a value from 1 to 35.", - "CopyTagsToSnapshot": "", + "CopyTagsToSnapshot": "Set to `true` to copy all tags from the source cluster snapshot to the target cluster snapshot, and otherwise `false` . The default is `false` .", "DBClusterIdentifier": "The cluster identifier. This parameter is stored as a lowercase string.\n\nConstraints:\n\n- Must contain from 1 to 63 letters, numbers, or hyphens.\n- The first character must be a letter.\n- Cannot end with a hyphen or contain two consecutive hyphens.\n\nExample: `my-cluster`", "DBClusterParameterGroupName": "The name of the cluster parameter group to associate with this cluster.", "DBSubnetGroupName": "A subnet group to associate with this cluster.\n\nConstraints: Must match the name of an existing `DBSubnetGroup` . Must not be default.\n\nExample: `mySubnetgroup`", "DeletionProtection": "Protects clusters from being accidentally deleted. If enabled, the cluster cannot be deleted unless it is modified and `DeletionProtection` is disabled.", "EnableCloudwatchLogsExports": "The list of log types that need to be enabled for exporting to Amazon CloudWatch Logs. You can enable audit logs or profiler logs. For more information, see [Auditing Amazon DocumentDB Events](https://docs.aws.amazon.com/documentdb/latest/developerguide/event-auditing.html) and [Profiling Amazon DocumentDB Operations](https://docs.aws.amazon.com/documentdb/latest/developerguide/profiling.html) .", - "EngineVersion": "The version number of the database engine to use. The `--engine-version` will default to the latest major engine version. For production workloads, we recommend explicitly declaring this parameter with the intended major engine version.", + "EngineVersion": "The version number of the database engine to use. The `--engine-version` will default to the latest major engine version. For production workloads, we recommend explicitly declaring this parameter with the intended major engine version.\n\nChanging the `EngineVersion` will start an in-place engine version upgrade. Note that in-place engine version upgrade will cause downtime in the cluster. See [Amazon DocumentDB in-place major version upgrade](https://docs.aws.amazon.com/documentdb/latest/developerguide/docdb-mvu.html) before starting an in-place engine version upgrade.", "KmsKeyId": "The AWS KMS key identifier for an encrypted cluster.\n\nThe AWS KMS key identifier is the Amazon Resource Name (ARN) for the AWS KMS encryption key. If you are creating a cluster using the same AWS account that owns the AWS KMS encryption key that is used to encrypt the new cluster, you can use the AWS KMS key alias instead of the ARN for the AWS KMS encryption key.\n\nIf an encryption key is not specified in `KmsKeyId` :\n\n- If the `StorageEncrypted` parameter is `true` , Amazon DocumentDB uses your default encryption key.\n\nAWS KMS creates the default encryption key for your AWS account . Your AWS account has a different default encryption key for each AWS Regions .", "MasterUserPassword": "The password for the master database user. This password can contain any printable ASCII character except forward slash (/), double quote (\"), or the \"at\" symbol (@).\n\nConstraints: Must contain from 8 to 100 characters.", "MasterUsername": "The name of the master user for the cluster.\n\nConstraints:\n\n- Must be from 1 to 63 letters or numbers.\n- The first character must be a letter.\n- Cannot be a reserved word for the chosen database engine.", @@ -10651,7 +11444,7 @@ "RestoreType": "The type of restore to be performed. You can specify one of the following values:\n\n- `full-copy` - The new DB cluster is restored as a full copy of the source DB cluster.\n- `copy-on-write` - The new DB cluster is restored as a clone of the source DB cluster.\n\nConstraints: You can't specify `copy-on-write` if the engine version of the source DB cluster is earlier than 1.11.\n\nIf you don't specify a `RestoreType` value, then the new DB cluster is restored as a full copy of the source DB cluster.", "SnapshotIdentifier": "The identifier for the snapshot or cluster snapshot to restore from.\n\nYou can use either the name or the Amazon Resource Name (ARN) to specify a cluster snapshot. However, you can use only the ARN to specify a snapshot.\n\nConstraints:\n\n- Must match the identifier of an existing snapshot.", "SourceDBClusterIdentifier": "The identifier of the source cluster from which to restore.\n\nConstraints:\n\n- Must match the identifier of an existing `DBCluster` .", - "StorageEncrypted": "Specifies whether the cluster is encrypted.", + "StorageEncrypted": "Specifies whether the cluster is encrypted.\n\nIf you specify `SourceDBClusterIdentifier` or `SnapshotIdentifier` and don\u2019t specify `StorageEncrypted` , the encryption property is inherited from the source cluster or snapshot (unless `KMSKeyId` is specified, in which case the restored cluster will be encrypted with that KMS key). If the source is encrypted and `StorageEncrypted` is specified to be true, the restored cluster will be encrypted (if you want to use a different KMS key, specify the `KMSKeyId` property as well). If the source is unencrypted and `StorageEncrypted` is specified to be true, then the `KMSKeyId` property must be specified. If the source is encrypted, don\u2019t specify `StorageEncrypted` to be false as opting out of encryption is not allowed.", "StorageType": "The storage type to associate with the DB cluster.\n\nFor information on storage types for Amazon DocumentDB clusters, see Cluster storage configurations in the *Amazon DocumentDB Developer Guide* .\n\nValid values for storage type - `standard | iopt1`\n\nDefault value is `standard`\n\n> When you create a DocumentDB DB cluster with the storage type set to `iopt1` , the storage type is returned in the response. The storage type isn't returned when you set it to `standard` .", "Tags": "The tags to be assigned to the cluster.", "UseLatestRestorableTime": "A value that is set to `true` to restore the cluster to the latest restorable backup time, and `false` otherwise.\n\nDefault: `false`\n\nConstraints: Cannot be specified if the `RestoreToTime` parameter is provided.", @@ -10675,7 +11468,7 @@ "AWS::DocDB::DBInstance": { "AutoMinorVersionUpgrade": "This parameter does not apply to Amazon DocumentDB. Amazon DocumentDB does not perform minor version upgrades regardless of the value set.\n\nDefault: `false`", "AvailabilityZone": "The Amazon EC2 Availability Zone that the instance is created in.\n\nDefault: A random, system-chosen Availability Zone in the endpoint's AWS Region .\n\nExample: `us-east-1d`", - "CACertificateIdentifier": "The CA certificate identifier to use for the DB instance's server certificate.\n\nFor more information, see [Updating Your Amazon DocumentDB TLS Certificates](https://docs.aws.amazon.com/documentdb/latest/developerguide/ca_cert_rotation.html) and [Encrypting Data in Transit](https://docs.aws.amazon.com/documentdb/latest/developerguide/security.encryption.ssl.html) in the *Amazon DocumentDB Developer Guide* .", + "CACertificateIdentifier": "The identifier of the CA certificate for this DB instance.", "CertificateRotationRestart": "Specifies whether the DB instance is restarted when you rotate your SSL/TLS certificate.\n\nBy default, the DB instance is restarted when you rotate your SSL/TLS certificate. The certificate is not updated until the DB instance is restarted.\n\n> Set this parameter only if you are *not* using SSL/TLS to connect to the DB instance. \n\nIf you are using SSL/TLS to connect to the DB instance, see [Updating Your Amazon DocumentDB TLS Certificates](https://docs.aws.amazon.com/documentdb/latest/developerguide/ca_cert_rotation.html) and [Encrypting Data in Transit](https://docs.aws.amazon.com/documentdb/latest/developerguide/security.encryption.ssl.html) in the *Amazon DocumentDB Developer Guide* .", "DBClusterIdentifier": "The identifier of the cluster that the instance will belong to.", "DBInstanceClass": "The compute and memory capacity of the instance; for example, `db.m4.large` . If you change the class of an instance there can be some interruption in the cluster's service.", @@ -10962,8 +11755,8 @@ "Tenancy": "Indicates the tenancy of the Capacity Reservation. A Capacity Reservation can have one of the following tenancy settings:\n\n- `default` - The Capacity Reservation is created on hardware that is shared with other AWS accounts .\n- `dedicated` - The Capacity Reservation is created on single-tenant hardware that is dedicated to a single AWS account ." }, "AWS::EC2::CapacityReservation Tag": { - "Key": "The key of the tag.\n\nConstraints: Tag keys are case-sensitive and accept a maximum of 127 Unicode characters. May not begin with `aws:` .", - "Value": "The value of the tag.\n\nConstraints: Tag values are case-sensitive and accept a maximum of 256 Unicode characters." + "Key": "The tag key.", + "Value": "The tag value." }, "AWS::EC2::CapacityReservation TagSpecification": { "ResourceType": "The type of resource to tag. Specify `capacity-reservation` .", @@ -11148,7 +11941,7 @@ "Placement": "The location where the instance launched, if applicable.", "Priority": "The priority for the launch template override. The highest priority is launched first.\n\nIf the On-Demand `AllocationStrategy` is set to `prioritized` , EC2 Fleet uses priority to determine which launch template override to use first in fulfilling On-Demand capacity.\n\nIf the Spot `AllocationStrategy` is set to `capacity-optimized-prioritized` , EC2 Fleet uses priority on a best-effort basis to determine which launch template override to use in fulfilling Spot capacity, but optimizes for capacity first.\n\nValid values are whole numbers starting at `0` . The lower the number, the higher the priority. If no number is set, the launch template override has the lowest priority. You can set the same priority for different launch template overrides.", "SubnetId": "The IDs of the subnets in which to launch the instances. Separate multiple subnet IDs using commas (for example, `subnet-1234abcdeexample1, subnet-0987cdef6example2` ). A request of type `instant` can have only one subnet ID.", - "WeightedCapacity": "The number of units provided by the specified instance type.\n\n> When specifying weights, the price used in the `lowest-price` and `price-capacity-optimized` allocation strategies is per *unit* hour (where the instance price is divided by the specified weight). However, if all the specified weights are above the requested `TargetCapacity` , resulting in only 1 instance being launched, the price used is per *instance* hour." + "WeightedCapacity": "The number of units provided by the specified instance type. These are the same units that you chose to set the target capacity in terms of instances, or a performance characteristic such as vCPUs, memory, or I/O.\n\nIf the target capacity divided by this value is not a whole number, Amazon EC2 rounds the number of instances to the next whole number. If this value is not specified, the default is 1.\n\n> When specifying weights, the price used in the `lowest-price` and `price-capacity-optimized` allocation strategies is per *unit* hour (where the instance price is divided by the specified weight). However, if all the specified weights are above the requested `TargetCapacity` , resulting in only 1 instance being launched, the price used is per *instance* hour." }, "AWS::EC2::EC2Fleet FleetLaunchTemplateSpecificationRequest": { "LaunchTemplateId": "The ID of the launch template.\n\nYou must specify the `LaunchTemplateId` or the `LaunchTemplateName` , but not both.", @@ -11315,6 +12108,7 @@ }, "AWS::EC2::IPAM": { "Description": "The description for the IPAM.", + "EnablePrivateGua": "Enable this option to use your own GUA ranges as private IPv6 addresses. This option is disabled by default.", "OperatingRegions": "The operating Regions for an IPAM. Operating Regions are AWS Regions where the IPAM is allowed to manage IP address CIDRs. IPAM only discovers and monitors resources in the AWS Regions you select as operating Regions.\n\nFor more information about operating Regions, see [Create an IPAM](https://docs.aws.amazon.com//vpc/latest/ipam/create-ipam.html) in the *Amazon VPC IPAM User Guide* .", "Tags": "The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key `Owner` and the value `TeamA` , specify `tag:Owner` for the filter name and `TeamA` for the filter value.", "Tier": "IPAM is offered in a Free Tier and an Advanced Tier. For more information about the features available in each tier and the costs associated with the tiers, see the [VPC IPAM product pricing page](https://docs.aws.amazon.com//vpc/pricing/) ." @@ -11342,7 +12136,7 @@ "AwsService": "Limits which service in AWS that the pool can be used in. \"ec2\", for example, allows users to use space for Elastic IP addresses and VPCs.", "Description": "The description of the IPAM pool.", "IpamScopeId": "The ID of the scope in which you would like to create the IPAM pool.", - "Locale": "The locale of the IPAM pool. In IPAM, the locale is the AWS Region where you want to make an IPAM pool available for allocations. Only resources in the same Region as the locale of the pool can get IP address allocations from the pool. You can only allocate a CIDR for a VPC, for example, from an IPAM pool that shares a locale with the VPC\u2019s Region. Note that once you choose a Locale for a pool, you cannot modify it. If you choose an AWS Region for locale that has not been configured as an operating Region for the IPAM, you'll get an error.", + "Locale": "The locale of the IPAM pool.\n\nThe locale for the pool should be one of the following:\n\n- An AWS Region where you want this IPAM pool to be available for allocations.\n- The network border group for an AWS Local Zone where you want this IPAM pool to be available for allocations ( [supported Local Zones](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-byoip.html#byoip-zone-avail) ). This option is only available for IPAM IPv4 pools in the public scope.\n\nIf you choose an AWS Region for locale that has not been configured as an operating Region for the IPAM, you'll get an error.", "ProvisionedCidrs": "Information about the CIDRs provisioned to an IPAM pool.", "PublicIpSource": "The IP address source for pools in the public scope. Only used for provisioning IP address CIDRs to pools in the public scope. Default is `BYOIP` . For more information, see [Create IPv6 pools](https://docs.aws.amazon.com//vpc/latest/ipam/intro-create-ipv6-pools.html) in the *Amazon VPC IPAM User Guide* . By default, you can add only one Amazon-provided IPv6 CIDR block to a top-level IPv6 pool. For information on increasing the default limit, see [Quotas for your IPAM](https://docs.aws.amazon.com//vpc/latest/ipam/quotas-ipam.html) in the *Amazon VPC IPAM User Guide* .", "PubliclyAdvertisable": "Determines if a pool is publicly advertisable. This option is not available for pools with AddressFamily set to `ipv4` .", @@ -11519,8 +12313,8 @@ "DocumentName": "The name of an SSM document to associate with the instance." }, "AWS::EC2::Instance State": { - "Code": "", - "Name": "" + "Code": "The state of the instance as a 16-bit unsigned integer.\n\nThe high byte is all of the bits between 2^8 and (2^16)-1, which equals decimal values between 256 and 65,535. These numerical values are used for internal purposes and should be ignored.\n\nThe low byte is all of the bits between 2^0 and (2^8)-1, which equals decimal values between 0 and 255.\n\nThe valid values for instance-state-code will all be in the range of the low byte and they are:\n\n- `0` : `pending`\n- `16` : `running`\n- `32` : `shutting-down`\n- `48` : `terminated`\n- `64` : `stopping`\n- `80` : `stopped`\n\nYou can ignore the high byte value by zeroing out all of the bits above 2^8 or 256 in decimal.", + "Name": "The current state of the instance." }, "AWS::EC2::Instance Tag": { "Key": "The tag key.", @@ -11562,7 +12356,7 @@ "AWS::EC2::LaunchTemplate": { "LaunchTemplateData": "The information for the launch template.", "LaunchTemplateName": "A name for the launch template.", - "TagSpecifications": "The tags to apply to the launch template on creation. To tag the launch template, the resource type must be `launch-template` .\n\nTo specify the tags for the resources that are created when an instance is launched, you must use [TagSpecifications](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ec2-launchtemplate.html#cfn-ec2-launchtemplate-tagspecifications) .", + "TagSpecifications": "The tags to apply to the launch template on creation. To tag the launch template, the resource type must be `launch-template` .\n\nTo specify the tags for resources that are created during instance launch, use [TagSpecifications](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-launchtemplate-launchtemplatedata.html#cfn-ec2-launchtemplate-launchtemplatedata-tagspecifications) .", "VersionDescription": "A description for the first version of the launch template." }, "AWS::EC2::LaunchTemplate AcceleratorCount": { @@ -11686,7 +12480,7 @@ "EnclaveOptions": "Indicates whether the instance is enabled for AWS Nitro Enclaves. For more information, see [What is AWS Nitro Enclaves?](https://docs.aws.amazon.com/enclaves/latest/user/nitro-enclave.html) in the *AWS Nitro Enclaves User Guide* .\n\nYou can't enable AWS Nitro Enclaves and hibernation on the same instance.", "HibernationOptions": "Indicates whether an instance is enabled for hibernation. This parameter is valid only if the instance meets the [hibernation prerequisites](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/hibernating-prerequisites.html) . For more information, see [Hibernate your Amazon EC2 instance](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) in the *Amazon EC2 User Guide* .", "IamInstanceProfile": "The name or Amazon Resource Name (ARN) of an IAM instance profile.", - "ImageId": "The ID of the AMI. Alternatively, you can specify a Systems Manager parameter, which will resolve to an AMI ID on launch.\n\nValid formats:\n\n- `ami-17characters00000`\n- `resolve:ssm:parameter-name`\n- `resolve:ssm:parameter-name:version-number`\n- `resolve:ssm:parameter-name:label`\n\nFor more information, see [Use a Systems Manager parameter to find an AMI](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/finding-an-ami.html#using-systems-manager-parameter-to-find-AMI) in the *Amazon Elastic Compute Cloud User Guide* .", + "ImageId": "The ID of the AMI. Alternatively, you can specify a Systems Manager parameter, which will resolve to an AMI ID on launch.\n\nValid formats:\n\n- `ami-0ac394d6a3example`\n- `resolve:ssm:parameter-name`\n- `resolve:ssm:parameter-name:version-number`\n- `resolve:ssm:parameter-name:label`\n\nFor more information, see [Use a Systems Manager parameter to find an AMI](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/finding-an-ami.html#using-systems-manager-parameter-to-find-AMI) in the *Amazon Elastic Compute Cloud User Guide* .", "InstanceInitiatedShutdownBehavior": "Indicates whether an instance stops or terminates when you initiate shutdown from the instance (using the operating system command for system shutdown).\n\nDefault: `stop`", "InstanceMarketOptions": "The market (purchasing) option for the instances.", "InstanceRequirements": "The attributes for the instance types. When you specify instance attributes, Amazon EC2 will identify instance types with these attributes.\n\nYou must specify `VCpuCount` and `MemoryMiB` . All other attributes are optional. Any unspecified optional attribute is set to its default.\n\nWhen you specify multiple attributes, you get instance types that satisfy all of the specified attributes. If you specify multiple values for an attribute, you get instance types that satisfy any of the specified values.\n\nTo limit the list of instance types from which Amazon EC2 can identify matching instance types, you can use one of the following parameters, but not both in the same request:\n\n- `AllowedInstanceTypes` - The instance types to include in the list. All other instance types are ignored, even if they match your specified attributes.\n- `ExcludedInstanceTypes` - The instance types to exclude from the list, even if they match your specified attributes.\n\n> If you specify `InstanceRequirements` , you can't specify `InstanceType` .\n> \n> Attribute-based instance type selection is only supported when using Auto Scaling groups, EC2 Fleet, and Spot Fleet to launch instances. If you plan to use the launch template in the [launch instance wizard](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-instance-wizard.html) , or with the [RunInstances](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances.html) API or [AWS::EC2::Instance](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-instance.html) AWS CloudFormation resource, you can't specify `InstanceRequirements` . \n\nFor more information, see [Attribute-based instance type selection for EC2 Fleet](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-fleet-attribute-based-instance-type-selection.html) , [Attribute-based instance type selection for Spot Fleet](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-fleet-attribute-based-instance-type-selection.html) , and [Spot placement score](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-placement-score.html) in the *Amazon EC2 User Guide* .", @@ -11703,7 +12497,7 @@ "RamDiskId": "The ID of the RAM disk.\n\n> We recommend that you use PV-GRUB instead of kernels and RAM disks. For more information, see [User provided kernels](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/UserProvidedkernels.html) in the *Amazon EC2 User Guide* .", "SecurityGroupIds": "The IDs of the security groups. You can specify the IDs of existing security groups and references to resources created by the stack template.\n\nIf you specify a network interface, you must specify any security groups as part of the network interface instead.", "SecurityGroups": "The names of the security groups. For a nondefault VPC, you must use security group IDs instead.\n\nIf you specify a network interface, you must specify any security groups as part of the network interface instead of using this parameter.", - "TagSpecifications": "The tags to apply to the resources that are created during instance launch.\n\nTo tag a resource after it has been created, see [CreateTags](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateTags.html) .\n\nTo tag the launch template itself, use [TagSpecifications](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ec2-launchtemplate.html#cfn-ec2-launchtemplate-tagspecifications) .", + "TagSpecifications": "The tags to apply to resources that are created during instance launch.\n\nTo tag the launch template itself, use [TagSpecifications](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ec2-launchtemplate.html#cfn-ec2-launchtemplate-tagspecifications) .", "UserData": "The user data to make available to the instance. You must provide base64-encoded text. User data is limited to 16 KB. For more information, see [Run commands on your Amazon EC2 instance at launch](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html) in the *Amazon EC2 User Guide* .\n\nIf you are creating the launch template for use with AWS Batch , the user data must be provided in the [MIME multi-part archive format](https://docs.aws.amazon.com/https://cloudinit.readthedocs.io/en/latest/topics/format.html#mime-multi-part-archive) . For more information, see [Amazon EC2 user data in launch templates](https://docs.aws.amazon.com/batch/latest/userguide/launch-templates.html) in the *AWS Batch User Guide* ." }, "AWS::EC2::LaunchTemplate LaunchTemplateElasticInferenceAccelerator": { @@ -11851,7 +12645,7 @@ "ConnectivityType": "Indicates whether the NAT gateway supports public or private connectivity. The default is public connectivity.", "MaxDrainDurationSeconds": "The maximum amount of time to wait (in seconds) before forcibly releasing the IP addresses if connections are still in progress. Default value is 350 seconds.", "PrivateIpAddress": "The private IPv4 address to assign to the NAT gateway. If you don't provide an address, a private IPv4 address will be automatically assigned.", - "SecondaryAllocationIds": "Secondary EIP allocation IDs. For more information, see [Create a NAT gateway](https://docs.aws.amazon.com/vpc/latest/userguide/vpc-nat-gateway.html#nat-gateway-creating) in the *Amazon VPC User Guide* .", + "SecondaryAllocationIds": "Secondary EIP allocation IDs. For more information, see [Create a NAT gateway](https://docs.aws.amazon.com/vpc/latest/userguide/nat-gateway-working-with.html) in the *Amazon VPC User Guide* .", "SecondaryPrivateIpAddressCount": "[Private NAT gateway only] The number of secondary private IPv4 addresses you want to assign to the NAT gateway. For more information about secondary addresses, see [Create a NAT gateway](https://docs.aws.amazon.com/vpc/latest/userguide/vpc-nat-gateway.html#nat-gateway-creating) in the *Amazon Virtual Private Cloud User Guide* .\n\n`SecondaryPrivateIpAddressCount` and `SecondaryPrivateIpAddresses` cannot be set at the same time.", "SecondaryPrivateIpAddresses": "Secondary private IPv4 addresses. For more information about secondary addresses, see [Create a NAT gateway](https://docs.aws.amazon.com/vpc/latest/userguide/vpc-nat-gateway.html#nat-gateway-creating) in the *Amazon Virtual Private Cloud User Guide* .\n\n`SecondaryPrivateIpAddressCount` and `SecondaryPrivateIpAddresses` cannot be set at the same time.", "SubnetId": "The ID of the subnet in which the NAT gateway is located.", @@ -11983,7 +12777,7 @@ "TransitGatewayId": "The ID of a transit gateway.", "VpcPeeringConnectionId": "The ID of a VPC peering connection.", "destinationCidr": "The destination IPv4 address, in CIDR notation.", - "destinationPrefixListId": "The prefix of the AWS service .", + "destinationPrefixListId": "The prefix of the AWS service.", "egressOnlyInternetGatewayId": "The ID of an egress-only internet gateway.", "gatewayId": "The ID of the gateway, such as an internet gateway or virtual private gateway.", "instanceId": "The ID of the instance, such as a NAT instance." @@ -12188,8 +12982,8 @@ }, "AWS::EC2::PrefixList": { "AddressFamily": "The IP address type.\n\nValid Values: `IPv4` | `IPv6`", - "Entries": "One or more entries for the prefix list.", - "MaxEntries": "The maximum number of entries for the prefix list.", + "Entries": "The entries for the prefix list.", + "MaxEntries": "The maximum number of entries for the prefix list. This property is required when you create a prefix list.", "PrefixListName": "A name for the prefix list.\n\nConstraints: Up to 255 characters in length. The name cannot start with `com.amazonaws` .", "Tags": "The tags for the prefix list." }, @@ -12387,7 +13181,7 @@ "Priority": "The priority for the launch template override. The highest priority is launched first.\n\nIf `OnDemandAllocationStrategy` is set to `prioritized` , Spot Fleet uses priority to determine which launch template override to use first in fulfilling On-Demand capacity.\n\nIf the Spot `AllocationStrategy` is set to `capacityOptimizedPrioritized` , Spot Fleet uses priority on a best-effort basis to determine which launch template override to use in fulfilling Spot capacity, but optimizes for capacity first.\n\nValid values are whole numbers starting at `0` . The lower the number, the higher the priority. If no number is set, the launch template override has the lowest priority. You can set the same priority for different launch template overrides.", "SpotPrice": "The maximum price per unit hour that you are willing to pay for a Spot Instance. We do not recommend using this parameter because it can lead to increased interruptions. If you do not specify this parameter, you will pay the current Spot price.\n\n> If you specify a maximum price, your instances will be interrupted more frequently than if you do not specify this parameter.", "SubnetId": "The ID of the subnet in which to launch the instances.", - "WeightedCapacity": "The number of units provided by the specified instance type.\n\n> When specifying weights, the price used in the `lowest-price` and `price-capacity-optimized` allocation strategies is per *unit* hour (where the instance price is divided by the specified weight). However, if all the specified weights are above the requested `TargetCapacity` , resulting in only 1 instance being launched, the price used is per *instance* hour." + "WeightedCapacity": "The number of units provided by the specified instance type. These are the same units that you chose to set the target capacity in terms of instances, or a performance characteristic such as vCPUs, memory, or I/O.\n\nIf the target capacity divided by this value is not a whole number, Amazon EC2 rounds the number of instances to the next whole number. If this value is not specified, the default is 1.\n\n> When specifying weights, the price used in the `lowestPrice` and `priceCapacityOptimized` allocation strategies is per *unit* hour (where the instance price is divided by the specified weight). However, if all the specified weights are above the requested `TargetCapacity` , resulting in only 1 instance being launched, the price used is per *instance* hour." }, "AWS::EC2::SpotFleet LoadBalancersConfig": { "ClassicLoadBalancersConfig": "The Classic Load Balancers.", @@ -12435,7 +13229,7 @@ "SubnetId": "The IDs of the subnets in which to launch the instances. To specify multiple subnets, separate them using commas; for example, \"subnet-1234abcdeexample1, subnet-0987cdef6example2\".\n\nIf you specify a network interface, you must specify any subnets as part of the network interface instead of using this parameter.", "TagSpecifications": "The tags to apply during creation.", "UserData": "The base64-encoded user data that instances use when starting up. User data is limited to 16 KB.", - "WeightedCapacity": "The number of units provided by the specified instance type. These are the same units that you chose to set the target capacity in terms of instances, or a performance characteristic such as vCPUs, memory, or I/O.\n\nIf the target capacity divided by this value is not a whole number, Amazon EC2 rounds the number of instances to the next whole number. If this value is not specified, the default is 1." + "WeightedCapacity": "The number of units provided by the specified instance type. These are the same units that you chose to set the target capacity in terms of instances, or a performance characteristic such as vCPUs, memory, or I/O.\n\nIf the target capacity divided by this value is not a whole number, Amazon EC2 rounds the number of instances to the next whole number. If this value is not specified, the default is 1.\n\n> When specifying weights, the price used in the `lowestPrice` and `priceCapacityOptimized` allocation strategies is per *unit* hour (where the instance price is divided by the specified weight). However, if all the specified weights are above the requested `TargetCapacity` , resulting in only 1 instance being launched, the price used is per *instance* hour." }, "AWS::EC2::SpotFleet SpotFleetMonitoring": { "Enabled": "Enables monitoring for the instance.\n\nDefault: `false`" @@ -12500,12 +13294,11 @@ "AvailabilityZone": "The Availability Zone of the subnet.\n\nIf you update this property, you must also update the `CidrBlock` property.", "AvailabilityZoneId": "The AZ ID of the subnet.", "CidrBlock": "The IPv4 CIDR block assigned to the subnet.\n\nIf you update this property, we create a new subnet, and then delete the existing one.", - "EnableDns64": "Indicates whether DNS queries made to the Amazon-provided DNS Resolver in this subnet should return synthetic IPv6 addresses for IPv4-only destinations. For more information, see [DNS64 and NAT64](https://docs.aws.amazon.com/vpc/latest/userguide/vpc-nat-gateway.html#nat-gateway-nat64-dns64) in the *Amazon Virtual Private Cloud User Guide* .", + "EnableDns64": "Indicates whether DNS queries made to the Amazon-provided DNS Resolver in this subnet should return synthetic IPv6 addresses for IPv4-only destinations.\n\n> You must first configure a NAT gateway in a public subnet (separate from the subnet containing the IPv6-only workloads). For example, the subnet containing the NAT gateway should have a `0.0.0.0/0` route pointing to the internet gateway. For more information, see [Configure DNS64 and NAT64](https://docs.aws.amazon.com/vpc/latest/userguide/nat-gateway-nat64-dns64.html#nat-gateway-nat64-dns64-walkthrough) in the *Amazon Virtual Private Cloud User Guide* .", "EnableLniAtDeviceIndex": "Indicates the device position for local network interfaces in this subnet. For example, `1` indicates local network interfaces in this subnet are the secondary network interface (eth1).", "Ipv4IpamPoolId": "An IPv4 IPAM pool ID for the subnet.", "Ipv4NetmaskLength": "An IPv4 netmask length for the subnet.", "Ipv6CidrBlock": "The IPv6 CIDR block.\n\nIf you specify `AssignIpv6AddressOnCreation` , you must also specify an IPv6 CIDR block.", - "Ipv6CidrBlocks": "The IPv6 network ranges for the subnet, in CIDR notation.", "Ipv6IpamPoolId": "An IPv6 IPAM pool ID for the subnet.", "Ipv6Native": "Indicates whether this is an IPv6 only subnet. For more information, see [Subnet basics](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html#subnet-basics) in the *Amazon Virtual Private Cloud User Guide* .", "Ipv6NetmaskLength": "An IPv6 netmask length for the subnet.", @@ -12561,8 +13354,8 @@ "TrafficMirrorFilterId": "The ID of the filter that this rule is associated with." }, "AWS::EC2::TrafficMirrorFilterRule Tag": { - "Key": "The key of the tag.\n\nConstraints: Tag keys are case-sensitive and accept a maximum of 127 Unicode characters. May not begin with `aws:` .", - "Value": "The value of the tag.\n\nConstraints: Tag values are case-sensitive and accept a maximum of 256 Unicode characters." + "Key": "The tag key.", + "Value": "The tag value." }, "AWS::EC2::TrafficMirrorFilterRule TrafficMirrorPortRange": { "FromPort": "The start of the Traffic Mirror port range. This applies to the TCP and UDP protocols.", @@ -12603,6 +13396,7 @@ "DnsSupport": "Enable or disable DNS support. Enabled by default.", "MulticastSupport": "Indicates whether multicast is enabled on the transit gateway", "PropagationDefaultRouteTableId": "The ID of the default propagation route table.", + "SecurityGroupReferencingSupport": "Enables you to reference a security group across VPCs attached to a transit gateway (TGW). Use this option to simplify security group management and control of instance-to-instance traffic across VPCs that are connected by transit gateway. You can also use this option to migrate from VPC peering (which was the only option that supported security group referencing) to transit gateways (which now also support security group referencing). This option is disabled by default and there are no additional costs to use this feature.\n\nFor important information about this feature, see [Create a transit gateway](https://docs.aws.amazon.com/vpc/latest/tgw/tgw-transit-gateways.html#create-tgw) in the *AWS Transit Gateway Guide* .", "Tags": "The tags for the transit gateway.", "TransitGatewayCidrBlocks": "The transit gateway CIDR blocks.", "VpnEcmpSupport": "Enable or disable Equal Cost Multipath Protocol support. Enabled by default." @@ -12718,7 +13512,8 @@ "AWS::EC2::TransitGatewayVpcAttachment Options": { "ApplianceModeSupport": "Enable or disable appliance mode support. The default is `disable` .", "DnsSupport": "Enable or disable DNS support. The default is `disable` .", - "Ipv6Support": "Enable or disable IPv6 support. The default is `disable` ." + "Ipv6Support": "Enable or disable IPv6 support. The default is `disable` .", + "SecurityGroupReferencingSupport": "Enables you to reference a security group across VPCs attached to a transit gateway (TGW). Use this option to simplify security group management and control of instance-to-instance traffic across VPCs that are connected by transit gateway. You can also use this option to migrate from VPC peering (which was the only option that supported security group referencing) to transit gateways (which now also support security group referencing). This option is disabled by default and there are no additional costs to use this feature.\n\nFor important information about this feature, see [Create a transit gateway](https://docs.aws.amazon.com/vpc/latest/tgw/tgw-transit-gateways.html#create-tgw) in the *AWS Transit Gateway Guide* ." }, "AWS::EC2::TransitGatewayVpcAttachment Tag": { "Key": "The tag key.", @@ -12753,7 +13548,7 @@ "VpcId": "The ID of the VPC." }, "AWS::EC2::VPCEndpoint": { - "PolicyDocument": "An endpoint policy, which controls access to the service from the VPC. The default endpoint policy allows full access to the service. Endpoint policies are supported only for gateway and interface endpoints.\n\nFor CloudFormation templates in YAML, you can provide the policy in JSON or YAML format. AWS CloudFormation converts YAML policies to JSON format before calling the API to create or modify the VPC endpoint.", + "PolicyDocument": "An endpoint policy, which controls access to the service from the VPC. The default endpoint policy allows full access to the service. Endpoint policies are supported only for gateway and interface endpoints.\n\nFor CloudFormation templates in YAML, you can provide the policy in JSON or YAML format. For example, if you have a JSON policy, you can convert it to YAML before including it in the YAML template, and AWS CloudFormation converts the policy to JSON format before calling the API actions for AWS PrivateLink . Alternatively, you can include the JSON directly in the YAML, as shown in the following `Properties` section:\n\n`Properties: VpcEndpointType: 'Interface' ServiceName: !Sub 'com.amazonaws.${AWS::Region}.logs' PolicyDocument: '{ \"Version\":\"2012-10-17\", \"Statement\": [{ \"Effect\":\"Allow\", \"Principal\":\"*\", \"Action\":[\"logs:Describe*\",\"logs:Get*\",\"logs:List*\",\"logs:FilterLogEvents\"], \"Resource\":\"*\" }] }'`", "PrivateDnsEnabled": "Indicate whether to associate a private hosted zone with the specified VPC. The private hosted zone contains a record set for the default public DNS name for the service for the Region (for example, `kinesis.us-east-1.amazonaws.com` ), which resolves to the private IP addresses of the endpoint network interfaces in the VPC. This enables you to make requests to the default public DNS name for the service instead of the public DNS names that are automatically generated by the VPC endpoint service.\n\nTo use a private hosted zone, you must set the following VPC attributes to `true` : `enableDnsHostnames` and `enableDnsSupport` .\n\nThis property is supported only for interface endpoints.\n\nDefault: `false`", "RouteTableIds": "The IDs of the route tables. Routing is supported only for gateway endpoints.", "SecurityGroupIds": "The IDs of the security groups to associate with the endpoint network interfaces. If this parameter is not specified, we use the default security group for the VPC. Security groups are supported only for interface endpoints.", @@ -12798,9 +13593,17 @@ }, "AWS::EC2::VPNConnection": { "CustomerGatewayId": "The ID of the customer gateway at your end of the VPN connection.", + "EnableAcceleration": "Indicate whether to enable acceleration for the VPN connection.\n\nDefault: `false`", + "LocalIpv4NetworkCidr": "The IPv4 CIDR on the customer gateway (on-premises) side of the VPN connection.\n\nDefault: `0.0.0.0/0`", + "LocalIpv6NetworkCidr": "The IPv6 CIDR on the customer gateway (on-premises) side of the VPN connection.\n\nDefault: `::/0`", + "OutsideIpAddressType": "The type of IPv4 address assigned to the outside interface of the customer gateway device.\n\nValid values: `PrivateIpv4` | `PublicIpv4`\n\nDefault: `PublicIpv4`", + "RemoteIpv4NetworkCidr": "The IPv4 CIDR on the AWS side of the VPN connection.\n\nDefault: `0.0.0.0/0`", + "RemoteIpv6NetworkCidr": "The IPv6 CIDR on the AWS side of the VPN connection.\n\nDefault: `::/0`", "StaticRoutesOnly": "Indicates whether the VPN connection uses static routes only. Static routes must be used for devices that don't support BGP.\n\nIf you are creating a VPN connection for a device that does not support Border Gateway Protocol (BGP), you must specify `true` .", "Tags": "Any tags assigned to the VPN connection.", "TransitGatewayId": "The ID of the transit gateway associated with the VPN connection.\n\nYou must specify either `TransitGatewayId` or `VpnGatewayId` , but not both.", + "TransportTransitGatewayAttachmentId": "The transit gateway attachment ID to use for the VPN tunnel.\n\nRequired if `OutsideIpAddressType` is set to `PrivateIpv4` .", + "TunnelInsideIpVersion": "Indicate whether the VPN tunnels process IPv4 or IPv6 traffic.\n\nDefault: `ipv4`", "Type": "The type of VPN connection.", "VpnGatewayId": "The ID of the virtual private gateway at the AWS side of the VPN connection.\n\nYou must specify either `TransitGatewayId` or `VpnGatewayId` , but not both.", "VpnTunnelOptionsSpecifications": "The tunnel options for the VPN connection." @@ -13031,7 +13834,7 @@ "Tags": "An array of key-value pairs to apply to this resource." }, "AWS::ECR::Repository EncryptionConfiguration": { - "EncryptionType": "The encryption type to use.\n\nIf you use the `KMS` encryption type, the contents of the repository will be encrypted using server-side encryption with AWS Key Management Service key stored in AWS KMS . When you use AWS KMS to encrypt your data, you can either use the default AWS managed AWS KMS key for Amazon ECR, or specify your own AWS KMS key, which you already created. For more information, see [Protecting data using server-side encryption with an AWS KMS key stored in AWS Key Management Service (SSE-KMS)](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html) in the *Amazon Simple Storage Service Console Developer Guide* .\n\nIf you use the `AES256` encryption type, Amazon ECR uses server-side encryption with Amazon S3-managed encryption keys which encrypts the images in the repository using an AES-256 encryption algorithm. For more information, see [Protecting data using server-side encryption with Amazon S3-managed encryption keys (SSE-S3)](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) in the *Amazon Simple Storage Service Console Developer Guide* .", + "EncryptionType": "The encryption type to use.\n\nIf you use the `KMS` encryption type, the contents of the repository will be encrypted using server-side encryption with AWS Key Management Service key stored in AWS KMS . When you use AWS KMS to encrypt your data, you can either use the default AWS managed AWS KMS key for Amazon ECR, or specify your own AWS KMS key, which you already created.\n\nIf you use the `KMS_DSSE` encryption type, the contents of the repository will be encrypted with two layers of encryption using server-side encryption with the AWS KMS Management Service key stored in AWS KMS . Similar to the `KMS` encryption type, you can either use the default AWS managed AWS KMS key for Amazon ECR, or specify your own AWS KMS key, which you've already created.\n\nIf you use the `AES256` encryption type, Amazon ECR uses server-side encryption with Amazon S3-managed encryption keys which encrypts the images in the repository using an AES256 encryption algorithm.\n\nFor more information, see [Amazon ECR encryption at rest](https://docs.aws.amazon.com/AmazonECR/latest/userguide/encryption-at-rest.html) in the *Amazon Elastic Container Registry User Guide* .", "KmsKey": "If you use the `KMS` encryption type, specify the AWS KMS key to use for encryption. The alias, key ID, or full ARN of the AWS KMS key can be specified. The key must exist in the same Region as the repository. If no key is specified, the default AWS managed AWS KMS key for Amazon ECR will be used." }, "AWS::ECR::Repository ImageScanningConfiguration": { @@ -13046,17 +13849,18 @@ "Value": "A `value` acts as a descriptor within a tag category (key)." }, "AWS::ECR::RepositoryCreationTemplate": { - "AppliedFor": "", - "Description": "", - "EncryptionConfiguration": "The encryption configuration for the repository. This determines how the contents of your repository are encrypted at rest.\n\nBy default, when no encryption configuration is set or the `AES256` encryption type is used, Amazon ECR uses server-side encryption with Amazon S3-managed encryption keys which encrypts your data at rest using an AES-256 encryption algorithm. This does not require any action on your part.\n\nFor more control over the encryption of the contents of your repository, you can use server-side encryption with AWS Key Management Service key stored in AWS Key Management Service ( AWS KMS ) to encrypt your images. For more information, see [Amazon ECR encryption at rest](https://docs.aws.amazon.com/AmazonECR/latest/userguide/encryption-at-rest.html) in the *Amazon Elastic Container Registry User Guide* .", - "ImageTagMutability": "", - "LifecyclePolicy": "", - "Prefix": "", - "RepositoryPolicy": "", - "ResourceTags": "The tags attached to the resource." + "AppliedFor": "A list of enumerable Strings representing the repository creation scenarios that this template will apply towards. The two supported scenarios are PULL_THROUGH_CACHE and REPLICATION", + "CustomRoleArn": "The ARN of the role to be assumed by Amazon ECR. Amazon ECR will assume your supplied role when the customRoleArn is specified. When this field isn't specified, Amazon ECR will use the service-linked role for the repository creation template.", + "Description": "The description associated with the repository creation template.", + "EncryptionConfiguration": "The encryption configuration associated with the repository creation template.", + "ImageTagMutability": "The tag mutability setting for the repository. If this parameter is omitted, the default setting of MUTABLE will be used which will allow image tags to be overwritten. If IMMUTABLE is specified, all image tags within the repository will be immutable which will prevent them from being overwritten.", + "LifecyclePolicy": "The lifecycle policy to use for repositories created using the template.", + "Prefix": "The repository namespace prefix associated with the repository creation template.", + "RepositoryPolicy": "he repository policy to apply to repositories created using the template. A repository policy is a permissions policy associated with a repository to control access permissions.", + "ResourceTags": "The metadata to apply to the repository to help you categorize and organize. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters." }, "AWS::ECR::RepositoryCreationTemplate EncryptionConfiguration": { - "EncryptionType": "The encryption type to use.\n\nIf you use the `KMS` encryption type, the contents of the repository will be encrypted using server-side encryption with AWS Key Management Service key stored in AWS KMS . When you use AWS KMS to encrypt your data, you can either use the default AWS managed AWS KMS key for Amazon ECR, or specify your own AWS KMS key, which you already created. For more information, see [Protecting data using server-side encryption with an AWS KMS key stored in AWS Key Management Service (SSE-KMS)](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html) in the *Amazon Simple Storage Service Console Developer Guide* .\n\nIf you use the `AES256` encryption type, Amazon ECR uses server-side encryption with Amazon S3-managed encryption keys which encrypts the images in the repository using an AES-256 encryption algorithm. For more information, see [Protecting data using server-side encryption with Amazon S3-managed encryption keys (SSE-S3)](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) in the *Amazon Simple Storage Service Console Developer Guide* .", + "EncryptionType": "The encryption type to use.\n\nIf you use the `KMS` encryption type, the contents of the repository will be encrypted using server-side encryption with AWS Key Management Service key stored in AWS KMS . When you use AWS KMS to encrypt your data, you can either use the default AWS managed AWS KMS key for Amazon ECR, or specify your own AWS KMS key, which you already created.\n\nIf you use the `KMS_DSSE` encryption type, the contents of the repository will be encrypted with two layers of encryption using server-side encryption with the AWS KMS Management Service key stored in AWS KMS . Similar to the `KMS` encryption type, you can either use the default AWS managed AWS KMS key for Amazon ECR, or specify your own AWS KMS key, which you've already created.\n\nIf you use the `AES256` encryption type, Amazon ECR uses server-side encryption with Amazon S3-managed encryption keys which encrypts the images in the repository using an AES256 encryption algorithm.\n\nFor more information, see [Amazon ECR encryption at rest](https://docs.aws.amazon.com/AmazonECR/latest/userguide/encryption-at-rest.html) in the *Amazon Elastic Container Registry User Guide* .", "KmsKey": "If you use the `KMS` encryption type, specify the AWS KMS key to use for encryption. The alias, key ID, or full ARN of the AWS KMS key can be specified. The key must exist in the same Region as the repository. If no key is specified, the default AWS managed AWS KMS key for Amazon ECR will be used." }, "AWS::ECR::RepositoryCreationTemplate Tag": { @@ -13076,7 +13880,7 @@ }, "AWS::ECS::CapacityProvider ManagedScaling": { "InstanceWarmupPeriod": "The period of time, in seconds, after a newly launched Amazon EC2 instance can contribute to CloudWatch metrics for Auto Scaling group. If this parameter is omitted, the default value of `300` seconds is used.", - "MaximumScalingStepSize": "The maximum number of Amazon EC2 instances that Amazon ECS will scale out at one time. The scale in process is not affected by this parameter. If this parameter is omitted, the default value of `10000` is used.", + "MaximumScalingStepSize": "The maximum number of Amazon EC2 instances that Amazon ECS will scale out at one time. If this parameter is omitted, the default value of `10000` is used.", "MinimumScalingStepSize": "The minimum number of Amazon EC2 instances that Amazon ECS will scale out at one time. The scale in process is not affected by this parameter If this parameter is omitted, the default value of `1` is used.\n\nWhen additional capacity is required, Amazon ECS will scale up the minimum scaling step size even if the actual demand is less than the minimum scaling step size.\n\nIf you use a capacity provider with an Auto Scaling group configured with more than one Amazon EC2 instance type or Availability Zone, Amazon ECS will scale up by the exact minimum scaling step size value and will ignore both the maximum scaling step size as well as the capacity demand.", "Status": "Determines whether to use managed scaling for the capacity provider.", "TargetCapacity": "The target capacity utilization as a percentage for the capacity provider. The specified value must be greater than `0` and less than or equal to `100` . For example, if you want the capacity provider to maintain 10% spare capacity, then that means the utilization is 90%, so use a `targetCapacity` of `90` . The default value of `100` percent results in the Amazon EC2 instances in your Auto Scaling group being completely used." @@ -13172,8 +13976,8 @@ }, "AWS::ECS::Service AwsVpcConfiguration": { "AssignPublicIp": "Whether the task's elastic network interface receives a public IP address. The default value is `DISABLED` .", - "SecurityGroups": "The IDs of the security groups associated with the task or service. If you don't specify a security group, the default security group for the VPC is used. There's a limit of 5 security groups that can be specified per `AwsVpcConfiguration` .\n\n> All specified security groups must be from the same VPC.", - "Subnets": "The IDs of the subnets associated with the task or service. There's a limit of 16 subnets that can be specified per `AwsVpcConfiguration` .\n\n> All specified subnets must be from the same VPC." + "SecurityGroups": "The IDs of the security groups associated with the task or service. If you don't specify a security group, the default security group for the VPC is used. There's a limit of 5 security groups that can be specified per `awsvpcConfiguration` .\n\n> All specified security groups must be from the same VPC.", + "Subnets": "The IDs of the subnets associated with the task or service. There's a limit of 16 subnets that can be specified per `awsvpcConfiguration` .\n\n> All specified subnets must be from the same VPC." }, "AWS::ECS::Service CapacityProviderStrategyItem": { "Base": "The *base* value designates how many tasks, at a minimum, to run on the specified capacity provider. Only one capacity provider in a capacity provider strategy can have a *base* defined. If no value is specified, the default value of `0` is used.", @@ -13192,8 +13996,8 @@ "AWS::ECS::Service DeploymentConfiguration": { "Alarms": "Information about the CloudWatch alarms.", "DeploymentCircuitBreaker": "> The deployment circuit breaker can only be used for services using the rolling update ( `ECS` ) deployment type. \n\nThe *deployment circuit breaker* determines whether a service deployment will fail if the service can't reach a steady state. If you use the deployment circuit breaker, a service deployment will transition to a failed state and stop launching new tasks. If you use the rollback option, when a service deployment fails, the service is rolled back to the last deployment that completed successfully. For more information, see [Rolling update](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-type-ecs.html) in the *Amazon Elastic Container Service Developer Guide*", - "MaximumPercent": "If a service is using the rolling update ( `ECS` ) deployment type, the `maximumPercent` parameter represents an upper limit on the number of your service's tasks that are allowed in the `RUNNING` or `PENDING` state during a deployment, as a percentage of the `desiredCount` (rounded down to the nearest integer). This parameter enables you to define the deployment batch size. For example, if your service is using the `REPLICA` service scheduler and has a `desiredCount` of four tasks and a `maximumPercent` value of 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default `maximumPercent` value for a service using the `REPLICA` service scheduler is 200%.\n\nIf a service is using either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types and tasks that use the EC2 launch type, the *maximum percent* value is set to the default value and is used to define the upper limit on the number of the tasks in the service that remain in the `RUNNING` state while the container instances are in the `DRAINING` state. If the tasks in the service use the Fargate launch type, the maximum percent value is not used, although it is returned when describing your service.", - "MinimumHealthyPercent": "If a service is using the rolling update ( `ECS` ) deployment type, the `minimumHealthyPercent` represents a lower limit on the number of your service's tasks that must remain in the `RUNNING` state during a deployment, as a percentage of the `desiredCount` (rounded up to the nearest integer). This parameter enables you to deploy without using additional cluster capacity. For example, if your service has a `desiredCount` of four tasks and a `minimumHealthyPercent` of 50%, the service scheduler may stop two existing tasks to free up cluster capacity before starting two new tasks.\n\nFor services that *do not* use a load balancer, the following should be noted:\n\n- A service is considered healthy if all essential containers within the tasks in the service pass their health checks.\n- If a task has no essential containers with a health check defined, the service scheduler will wait for 40 seconds after a task reaches a `RUNNING` state before the task is counted towards the minimum healthy percent total.\n- If a task has one or more essential containers with a health check defined, the service scheduler will wait for the task to reach a healthy status before counting it towards the minimum healthy percent total. A task is considered healthy when all essential containers within the task have passed their health checks. The amount of time the service scheduler can wait for is determined by the container health check settings.\n\nFor services that *do* use a load balancer, the following should be noted:\n\n- If a task has no essential containers with a health check defined, the service scheduler will wait for the load balancer target group health check to return a healthy status before counting the task towards the minimum healthy percent total.\n- If a task has an essential container with a health check defined, the service scheduler will wait for both the task to reach a healthy status and the load balancer target group health check to return a healthy status before counting the task towards the minimum healthy percent total.\n\nThe default value for a replica service for `minimumHealthyPercent` is 100%. The default `minimumHealthyPercent` value for a service using the `DAEMON` service schedule is 0% for the AWS CLI , the AWS SDKs, and the APIs and 50% for the AWS Management Console.\n\nThe minimum number of healthy tasks during a deployment is the `desiredCount` multiplied by the `minimumHealthyPercent` /100, rounded up to the nearest integer value.\n\nIf a service is using either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types and is running tasks that use the EC2 launch type, the *minimum healthy percent* value is set to the default value and is used to define the lower limit on the number of the tasks in the service that remain in the `RUNNING` state while the container instances are in the `DRAINING` state. If a service is using either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types and is running tasks that use the Fargate launch type, the minimum healthy percent value is not used, although it is returned when describing your service." + "MaximumPercent": "If a service is using the rolling update ( `ECS` ) deployment type, the `maximumPercent` parameter represents an upper limit on the number of your service's tasks that are allowed in the `RUNNING` or `PENDING` state during a deployment, as a percentage of the `desiredCount` (rounded down to the nearest integer). This parameter enables you to define the deployment batch size. For example, if your service is using the `REPLICA` service scheduler and has a `desiredCount` of four tasks and a `maximumPercent` value of 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default `maximumPercent` value for a service using the `REPLICA` service scheduler is 200%.\n\nIf a service is using either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types, and tasks in the service use the EC2 launch type, the *maximum percent* value is set to the default value. The *maximum percent* value is used to define the upper limit on the number of the tasks in the service that remain in the `RUNNING` state while the container instances are in the `DRAINING` state.\n\n> You can't specify a custom `maximumPercent` value for a service that uses either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types and has tasks that use the EC2 launch type. \n\nIf the tasks in the service use the Fargate launch type, the maximum percent value is not used, although it is returned when describing your service.", + "MinimumHealthyPercent": "If a service is using the rolling update ( `ECS` ) deployment type, the `minimumHealthyPercent` represents a lower limit on the number of your service's tasks that must remain in the `RUNNING` state during a deployment, as a percentage of the `desiredCount` (rounded up to the nearest integer). This parameter enables you to deploy without using additional cluster capacity. For example, if your service has a `desiredCount` of four tasks and a `minimumHealthyPercent` of 50%, the service scheduler may stop two existing tasks to free up cluster capacity before starting two new tasks.\n\nFor services that *do not* use a load balancer, the following should be noted:\n\n- A service is considered healthy if all essential containers within the tasks in the service pass their health checks.\n- If a task has no essential containers with a health check defined, the service scheduler will wait for 40 seconds after a task reaches a `RUNNING` state before the task is counted towards the minimum healthy percent total.\n- If a task has one or more essential containers with a health check defined, the service scheduler will wait for the task to reach a healthy status before counting it towards the minimum healthy percent total. A task is considered healthy when all essential containers within the task have passed their health checks. The amount of time the service scheduler can wait for is determined by the container health check settings.\n\nFor services that *do* use a load balancer, the following should be noted:\n\n- If a task has no essential containers with a health check defined, the service scheduler will wait for the load balancer target group health check to return a healthy status before counting the task towards the minimum healthy percent total.\n- If a task has an essential container with a health check defined, the service scheduler will wait for both the task to reach a healthy status and the load balancer target group health check to return a healthy status before counting the task towards the minimum healthy percent total.\n\nThe default value for a replica service for `minimumHealthyPercent` is 100%. The default `minimumHealthyPercent` value for a service using the `DAEMON` service schedule is 0% for the AWS CLI , the AWS SDKs, and the APIs and 50% for the AWS Management Console.\n\nThe minimum number of healthy tasks during a deployment is the `desiredCount` multiplied by the `minimumHealthyPercent` /100, rounded up to the nearest integer value.\n\nIf a service is using either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types and is running tasks that use the EC2 launch type, the *minimum healthy percent* value is set to the default value. The *minimum healthy percent* value is used to define the lower limit on the number of the tasks in the service that remain in the `RUNNING` state while the container instances are in the `DRAINING` state.\n\n> You can't specify a custom `minimumHealthyPercent` value for a service that uses either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types and has tasks that use the EC2 launch type. \n\nIf a service is using either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types and is running tasks that use the Fargate launch type, the minimum healthy percent value is not used, although it is returned when describing your service." }, "AWS::ECS::Service DeploymentController": { "Type": "The deployment controller type to use. There are three deployment controller types available:\n\n- **ECS** - The rolling update ( `ECS` ) deployment type involves replacing the current running version of the container with the latest version. The number of containers Amazon ECS adds or removes from the service during a rolling update is controlled by adjusting the minimum and maximum number of healthy tasks allowed during a service deployment, as specified in the [DeploymentConfiguration](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_DeploymentConfiguration.html) .\n- **CODE_DEPLOY** - The blue/green ( `CODE_DEPLOY` ) deployment type uses the blue/green deployment model powered by AWS CodeDeploy , which allows you to verify a new deployment of a service before sending production traffic to it.\n- **EXTERNAL** - The external ( `EXTERNAL` ) deployment type enables you to use any third-party deployment controller for full control over the deployment process for an Amazon ECS service." @@ -13211,7 +14015,7 @@ }, "AWS::ECS::Service LogConfiguration": { "LogDriver": "The log driver to use for the container.\n\nFor tasks on AWS Fargate , the supported log drivers are `awslogs` , `splunk` , and `awsfirelens` .\n\nFor tasks hosted on Amazon EC2 instances, the supported log drivers are `awslogs` , `fluentd` , `gelf` , `json-file` , `journald` , `syslog` , `splunk` , and `awsfirelens` .\n\nFor more information about using the `awslogs` log driver, see [Send Amazon ECS logs to CloudWatch](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using_awslogs.html) in the *Amazon Elastic Container Service Developer Guide* .\n\nFor more information about using the `awsfirelens` log driver, see [Send Amazon ECS logs to an AWS service or AWS Partner](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using_firelens.html) .\n\n> If you have a custom driver that isn't listed, you can fork the Amazon ECS container agent project that's [available on GitHub](https://docs.aws.amazon.com/https://github.com/aws/amazon-ecs-agent) and customize it to work with that driver. We encourage you to submit pull requests for changes that you would like to have included. However, we don't currently provide support for running modified copies of this software.", - "Options": "The configuration options to send to the log driver. This parameter requires version 1.19 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`", + "Options": "The configuration options to send to the log driver.\n\nThe options you can specify depend on the log driver. Some of the options you can specify when you use the `awslogs` log driver to route logs to Amazon CloudWatch include the following:\n\n- **awslogs-create-group** - Required: No\n\nSpecify whether you want the log group to be created automatically. If this option isn't specified, it defaults to `false` .\n\n> Your IAM policy must include the `logs:CreateLogGroup` permission before you attempt to use `awslogs-create-group` .\n- **awslogs-region** - Required: Yes\n\nSpecify the AWS Region that the `awslogs` log driver is to send your Docker logs to. You can choose to send all of your logs from clusters in different Regions to a single region in CloudWatch Logs. This is so that they're all visible in one location. Otherwise, you can separate them by Region for more granularity. Make sure that the specified log group exists in the Region that you specify with this option.\n- **awslogs-group** - Required: Yes\n\nMake sure to specify a log group that the `awslogs` log driver sends its log streams to.\n- **awslogs-stream-prefix** - Required: Yes, when using the Fargate launch type.Optional for the EC2 launch type, required for the Fargate launch type.\n\nUse the `awslogs-stream-prefix` option to associate a log stream with the specified prefix, the container name, and the ID of the Amazon ECS task that the container belongs to. If you specify a prefix with this option, then the log stream takes the format `prefix-name/container-name/ecs-task-id` .\n\nIf you don't specify a prefix with this option, then the log stream is named after the container ID that's assigned by the Docker daemon on the container instance. Because it's difficult to trace logs back to the container that sent them with just the Docker container ID (which is only available on the container instance), we recommend that you specify a prefix with this option.\n\nFor Amazon ECS services, you can use the service name as the prefix. Doing so, you can trace log streams to the service that the container belongs to, the name of the container that sent them, and the ID of the task that the container belongs to.\n\nYou must specify a stream-prefix for your logs to have your logs appear in the Log pane when using the Amazon ECS console.\n- **awslogs-datetime-format** - Required: No\n\nThis option defines a multiline start pattern in Python `strftime` format. A log message consists of a line that matches the pattern and any following lines that don\u2019t match the pattern. The matched line is the delimiter between log messages.\n\nOne example of a use case for using this format is for parsing output such as a stack dump, which might otherwise be logged in multiple entries. The correct pattern allows it to be captured in a single entry.\n\nFor more information, see [awslogs-datetime-format](https://docs.aws.amazon.com/https://docs.docker.com/config/containers/logging/awslogs/#awslogs-datetime-format) .\n\nYou cannot configure both the `awslogs-datetime-format` and `awslogs-multiline-pattern` options.\n\n> Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance.\n- **awslogs-multiline-pattern** - Required: No\n\nThis option defines a multiline start pattern that uses a regular expression. A log message consists of a line that matches the pattern and any following lines that don\u2019t match the pattern. The matched line is the delimiter between log messages.\n\nFor more information, see [awslogs-multiline-pattern](https://docs.aws.amazon.com/https://docs.docker.com/config/containers/logging/awslogs/#awslogs-multiline-pattern) .\n\nThis option is ignored if `awslogs-datetime-format` is also configured.\n\nYou cannot configure both the `awslogs-datetime-format` and `awslogs-multiline-pattern` options.\n\n> Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance.\n- **mode** - Required: No\n\nValid values: `non-blocking` | `blocking`\n\nThis option defines the delivery mode of log messages from the container to CloudWatch Logs. The delivery mode you choose affects application availability when the flow of logs from container to CloudWatch is interrupted.\n\nIf you use the `blocking` mode and the flow of logs to CloudWatch is interrupted, calls from container code to write to the `stdout` and `stderr` streams will block. The logging thread of the application will block as a result. This may cause the application to become unresponsive and lead to container healthcheck failure.\n\nIf you use the `non-blocking` mode, the container's logs are instead stored in an in-memory intermediate buffer configured with the `max-buffer-size` option. This prevents the application from becoming unresponsive when logs cannot be sent to CloudWatch. We recommend using this mode if you want to ensure service availability and are okay with some log loss. For more information, see [Preventing log loss with non-blocking mode in the `awslogs` container log driver](https://docs.aws.amazon.com/containers/preventing-log-loss-with-non-blocking-mode-in-the-awslogs-container-log-driver/) .\n- **max-buffer-size** - Required: No\n\nDefault value: `1m`\n\nWhen `non-blocking` mode is used, the `max-buffer-size` log option controls the size of the buffer that's used for intermediate message storage. Make sure to specify an adequate buffer size based on your application. When the buffer fills up, further logs cannot be stored. Logs that cannot be stored are lost.\n\nTo route logs using the `splunk` log router, you need to specify a `splunk-token` and a `splunk-url` .\n\nWhen you use the `awsfirelens` log router to route logs to an AWS Service or AWS Partner Network destination for log storage and analytics, you can set the `log-driver-buffer-limit` option to limit the number of events that are buffered in memory, before being sent to the log router container. It can help to resolve potential log loss issue because high throughput might result in memory running out for the buffer inside of Docker.\n\nOther options you can specify when using `awsfirelens` to route logs depend on the destination. When you export logs to Amazon Data Firehose, you can specify the AWS Region with `region` and a name for the log stream with `delivery_stream` .\n\nWhen you export logs to Amazon Kinesis Data Streams, you can specify an AWS Region with `region` and a data stream name with `stream` .\n\nWhen you export logs to Amazon OpenSearch Service, you can specify options like `Name` , `Host` (OpenSearch Service endpoint without protocol), `Port` , `Index` , `Type` , `Aws_auth` , `Aws_region` , `Suppress_Type_Name` , and `tls` .\n\nWhen you export logs to Amazon S3, you can specify the bucket using the `bucket` option. You can also specify `region` , `total_file_size` , `upload_timeout` , and `use_put_object` as options.\n\nThis parameter requires version 1.19 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`", "SecretOptions": "The secrets to pass to the log configuration. For more information, see [Specifying sensitive data](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/specifying-sensitive-data.html) in the *Amazon Elastic Container Service Developer Guide* ." }, "AWS::ECS::Service NetworkConfiguration": { @@ -13235,7 +14039,7 @@ }, "AWS::ECS::Service ServiceConnectConfiguration": { "Enabled": "Specifies whether to use Service Connect with this service.", - "LogConfiguration": "The log configuration for the container. This parameter maps to `LogConfig` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--log-driver` option to [`docker run`](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/commandline/run/) .\n\nBy default, containers use the same logging driver that the Docker daemon uses. However, the container might use a different logging driver than the Docker daemon by specifying a log driver configuration in the container definition. For more information about the options for different supported log drivers, see [Configure logging drivers](https://docs.aws.amazon.com/https://docs.docker.com/engine/admin/logging/overview/) in the Docker documentation.\n\nUnderstand the following when specifying a log configuration for your containers.\n\n- Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon. Additional log drivers may be available in future releases of the Amazon ECS container agent.\n\nFor tasks on AWS Fargate , the supported log drivers are `awslogs` , `splunk` , and `awsfirelens` .\n\nFor tasks hosted on Amazon EC2 instances, the supported log drivers are `awslogs` , `fluentd` , `gelf` , `json-file` , `journald` , `syslog` , `splunk` , and `awsfirelens` .\n- This parameter requires version 1.18 of the Docker Remote API or greater on your container instance.\n- For tasks that are hosted on Amazon EC2 instances, the Amazon ECS container agent must register the available logging drivers with the `ECS_AVAILABLE_LOGGING_DRIVERS` environment variable before containers placed on that instance can use these log configuration options. For more information, see [Amazon ECS container agent configuration](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) in the *Amazon Elastic Container Service Developer Guide* .\n- For tasks that are on AWS Fargate , because you don't have access to the underlying infrastructure your tasks are hosted on, any additional software needed must be installed outside of the task. For example, the Fluentd output aggregators or a remote host running Logstash to send Gelf logs to.", + "LogConfiguration": "The log configuration for the container. This parameter maps to `LogConfig` in the docker container create command and the `--log-driver` option to docker run.\n\nBy default, containers use the same logging driver that the Docker daemon uses. However, the container might use a different logging driver than the Docker daemon by specifying a log driver configuration in the container definition.\n\nUnderstand the following when specifying a log configuration for your containers.\n\n- Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon. Additional log drivers may be available in future releases of the Amazon ECS container agent.\n\nFor tasks on AWS Fargate , the supported log drivers are `awslogs` , `splunk` , and `awsfirelens` .\n\nFor tasks hosted on Amazon EC2 instances, the supported log drivers are `awslogs` , `fluentd` , `gelf` , `json-file` , `journald` , `syslog` , `splunk` , and `awsfirelens` .\n- This parameter requires version 1.18 of the Docker Remote API or greater on your container instance.\n- For tasks that are hosted on Amazon EC2 instances, the Amazon ECS container agent must register the available logging drivers with the `ECS_AVAILABLE_LOGGING_DRIVERS` environment variable before containers placed on that instance can use these log configuration options. For more information, see [Amazon ECS container agent configuration](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) in the *Amazon Elastic Container Service Developer Guide* .\n- For tasks that are on AWS Fargate , because you don't have access to the underlying infrastructure your tasks are hosted on, any additional software needed must be installed outside of the task. For example, the Fluentd output aggregators or a remote host running Logstash to send Gelf logs to.", "Namespace": "The namespace name or full Amazon Resource Name (ARN) of the AWS Cloud Map namespace for use with Service Connect. The namespace must be in the same AWS Region as the Amazon ECS service and cluster. The type of namespace doesn't affect Service Connect. For more information about AWS Cloud Map , see [Working with Services](https://docs.aws.amazon.com/cloud-map/latest/dg/working-with-services.html) in the *AWS Cloud Map Developer Guide* .", "Services": "The list of Service Connect service objects. These are names and aliases (also known as endpoints) that are used by other Amazon ECS services to connect to this service.\n\nThis field is not required for a \"client\" Amazon ECS service that's a member of a namespace only to connect to other services within the namespace. An example of this would be a frontend application that accepts incoming requests from either a load balancer that's attached to the service or by other means.\n\nAn object selects a port from the task definition, assigns a name for the AWS Cloud Map service, and a list of aliases (endpoints) and ports for client applications to refer to this service." }, @@ -13292,16 +14096,16 @@ "ExecutionRoleArn": "The Amazon Resource Name (ARN) of the task execution role that grants the Amazon ECS container agent permission to make AWS API calls on your behalf. For informationabout the required IAM roles for Amazon ECS, see [IAM roles for Amazon ECS](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/security-ecs-iam-role-overview.html) in the *Amazon Elastic Container Service Developer Guide* .", "Family": "The name of a family that this task definition is registered to. Up to 255 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed.\n\nA family groups multiple versions of a task definition. Amazon ECS gives the first task definition that you registered to a family a revision number of 1. Amazon ECS gives sequential revision numbers to each task definition that you add.\n\n> To use revision numbers when you update a task definition, specify this property. If you don't specify a value, AWS CloudFormation generates a new task definition each time that you update it.", "InferenceAccelerators": "The Elastic Inference accelerators to use for the containers in the task.", - "IpcMode": "The IPC resource namespace to use for the containers in the task. The valid values are `host` , `task` , or `none` . If `host` is specified, then all containers within the tasks that specified the `host` IPC mode on the same container instance share the same IPC resources with the host Amazon EC2 instance. If `task` is specified, all containers within the specified task share the same IPC resources. If `none` is specified, then IPC resources within the containers of a task are private and not shared with other containers in a task or on the container instance. If no value is specified, then the IPC resource namespace sharing depends on the Docker daemon setting on the container instance. For more information, see [IPC settings](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#ipc-settings---ipc) in the *Docker run reference* .\n\nIf the `host` IPC mode is used, be aware that there is a heightened risk of undesired IPC namespace expose. For more information, see [Docker security](https://docs.aws.amazon.com/https://docs.docker.com/engine/security/security/) .\n\nIf you are setting namespaced kernel parameters using `systemControls` for the containers in the task, the following will apply to your IPC resource namespace. For more information, see [System Controls](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html) in the *Amazon Elastic Container Service Developer Guide* .\n\n- For tasks that use the `host` IPC mode, IPC namespace related `systemControls` are not supported.\n- For tasks that use the `task` IPC mode, IPC namespace related `systemControls` will apply to all containers within a task.\n\n> This parameter is not supported for Windows containers or tasks run on AWS Fargate .", + "IpcMode": "The IPC resource namespace to use for the containers in the task. The valid values are `host` , `task` , or `none` . If `host` is specified, then all containers within the tasks that specified the `host` IPC mode on the same container instance share the same IPC resources with the host Amazon EC2 instance. If `task` is specified, all containers within the specified task share the same IPC resources. If `none` is specified, then IPC resources within the containers of a task are private and not shared with other containers in a task or on the container instance. If no value is specified, then the IPC resource namespace sharing depends on the Docker daemon setting on the container instance.\n\nIf the `host` IPC mode is used, be aware that there is a heightened risk of undesired IPC namespace expose.\n\nIf you are setting namespaced kernel parameters using `systemControls` for the containers in the task, the following will apply to your IPC resource namespace. For more information, see [System Controls](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html) in the *Amazon Elastic Container Service Developer Guide* .\n\n- For tasks that use the `host` IPC mode, IPC namespace related `systemControls` are not supported.\n- For tasks that use the `task` IPC mode, IPC namespace related `systemControls` will apply to all containers within a task.\n\n> This parameter is not supported for Windows containers or tasks run on AWS Fargate .", "Memory": "The amount (in MiB) of memory used by the task.\n\nIf your tasks runs on Amazon EC2 instances, you must specify either a task-level memory value or a container-level memory value. This field is optional and any value can be used. If a task-level memory value is specified, the container-level memory value is optional. For more information regarding container-level memory and memory reservation, see [ContainerDefinition](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_ContainerDefinition.html) .\n\nIf your tasks runs on AWS Fargate , this field is required. You must use one of the following values. The value you choose determines your range of valid values for the `cpu` parameter.\n\n- 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB) - Available `cpu` values: 256 (.25 vCPU)\n- 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB) - Available `cpu` values: 512 (.5 vCPU)\n- 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB) - Available `cpu` values: 1024 (1 vCPU)\n- Between 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB) - Available `cpu` values: 2048 (2 vCPU)\n- Between 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB) - Available `cpu` values: 4096 (4 vCPU)\n- Between 16 GB and 60 GB in 4 GB increments - Available `cpu` values: 8192 (8 vCPU)\n\nThis option requires Linux platform `1.4.0` or later.\n- Between 32GB and 120 GB in 8 GB increments - Available `cpu` values: 16384 (16 vCPU)\n\nThis option requires Linux platform `1.4.0` or later.", - "NetworkMode": "The Docker networking mode to use for the containers in the task. The valid values are `none` , `bridge` , `awsvpc` , and `host` . If no network mode is specified, the default is `bridge` .\n\nFor Amazon ECS tasks on Fargate, the `awsvpc` network mode is required. For Amazon ECS tasks on Amazon EC2 Linux instances, any network mode can be used. For Amazon ECS tasks on Amazon EC2 Windows instances, `` or `awsvpc` can be used. If the network mode is set to `none` , you cannot specify port mappings in your container definitions, and the tasks containers do not have external connectivity. The `host` and `awsvpc` network modes offer the highest networking performance for containers because they use the EC2 network stack instead of the virtualized network stack provided by the `bridge` mode.\n\nWith the `host` and `awsvpc` network modes, exposed container ports are mapped directly to the corresponding host port (for the `host` network mode) or the attached elastic network interface port (for the `awsvpc` network mode), so you cannot take advantage of dynamic host port mappings.\n\n> When using the `host` network mode, you should not run containers using the root user (UID 0). It is considered best practice to use a non-root user. \n\nIf the network mode is `awsvpc` , the task is allocated an elastic network interface, and you must specify a `NetworkConfiguration` value when you create a service or run a task with the task definition. For more information, see [Task Networking](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-networking.html) in the *Amazon Elastic Container Service Developer Guide* .\n\nIf the network mode is `host` , you cannot run multiple instantiations of the same task on a single container instance when port mappings are used.\n\nFor more information, see [Network settings](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#network-settings) in the *Docker run reference* .", - "PidMode": "The process namespace to use for the containers in the task. The valid values are `host` or `task` . On Fargate for Linux containers, the only valid value is `task` . For example, monitoring sidecars might need `pidMode` to access information about other containers running in the same task.\n\nIf `host` is specified, all containers within the tasks that specified the `host` PID mode on the same container instance share the same process namespace with the host Amazon EC2 instance.\n\nIf `task` is specified, all containers within the specified task share the same process namespace.\n\nIf no value is specified, the default is a private namespace for each container. For more information, see [PID settings](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#pid-settings---pid) in the *Docker run reference* .\n\nIf the `host` PID mode is used, there's a heightened risk of undesired process namespace exposure. For more information, see [Docker security](https://docs.aws.amazon.com/https://docs.docker.com/engine/security/security/) .\n\n> This parameter is not supported for Windows containers. > This parameter is only supported for tasks that are hosted on AWS Fargate if the tasks are using platform version `1.4.0` or later (Linux). This isn't supported for Windows containers on Fargate.", + "NetworkMode": "The Docker networking mode to use for the containers in the task. The valid values are `none` , `bridge` , `awsvpc` , and `host` . If no network mode is specified, the default is `bridge` .\n\nFor Amazon ECS tasks on Fargate, the `awsvpc` network mode is required. For Amazon ECS tasks on Amazon EC2 Linux instances, any network mode can be used. For Amazon ECS tasks on Amazon EC2 Windows instances, `` or `awsvpc` can be used. If the network mode is set to `none` , you cannot specify port mappings in your container definitions, and the tasks containers do not have external connectivity. The `host` and `awsvpc` network modes offer the highest networking performance for containers because they use the EC2 network stack instead of the virtualized network stack provided by the `bridge` mode.\n\nWith the `host` and `awsvpc` network modes, exposed container ports are mapped directly to the corresponding host port (for the `host` network mode) or the attached elastic network interface port (for the `awsvpc` network mode), so you cannot take advantage of dynamic host port mappings.\n\n> When using the `host` network mode, you should not run containers using the root user (UID 0). It is considered best practice to use a non-root user. \n\nIf the network mode is `awsvpc` , the task is allocated an elastic network interface, and you must specify a [NetworkConfiguration](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_NetworkConfiguration.html) value when you create a service or run a task with the task definition. For more information, see [Task Networking](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-networking.html) in the *Amazon Elastic Container Service Developer Guide* .\n\nIf the network mode is `host` , you cannot run multiple instantiations of the same task on a single container instance when port mappings are used.", + "PidMode": "The process namespace to use for the containers in the task. The valid values are `host` or `task` . On Fargate for Linux containers, the only valid value is `task` . For example, monitoring sidecars might need `pidMode` to access information about other containers running in the same task.\n\nIf `host` is specified, all containers within the tasks that specified the `host` PID mode on the same container instance share the same process namespace with the host Amazon EC2 instance.\n\nIf `task` is specified, all containers within the specified task share the same process namespace.\n\nIf no value is specified, the default is a private namespace for each container.\n\nIf the `host` PID mode is used, there's a heightened risk of undesired process namespace exposure.\n\n> This parameter is not supported for Windows containers. > This parameter is only supported for tasks that are hosted on AWS Fargate if the tasks are using platform version `1.4.0` or later (Linux). This isn't supported for Windows containers on Fargate.", "PlacementConstraints": "An array of placement constraint objects to use for tasks.\n\n> This parameter isn't supported for tasks run on AWS Fargate .", "ProxyConfiguration": "The configuration details for the App Mesh proxy.\n\nYour Amazon ECS container instances require at least version 1.26.0 of the container agent and at least version 1.26.0-1 of the `ecs-init` package to use a proxy configuration. If your container instances are launched from the Amazon ECS optimized AMI version `20190301` or later, they contain the required versions of the container agent and `ecs-init` . For more information, see [Amazon ECS-optimized Linux AMI](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html) in the *Amazon Elastic Container Service Developer Guide* .", "RequiresCompatibilities": "The task launch types the task definition was validated against. The valid values are `EC2` , `FARGATE` , and `EXTERNAL` . For more information, see [Amazon ECS launch types](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) in the *Amazon Elastic Container Service Developer Guide* .", "RuntimePlatform": "The operating system that your tasks definitions run on. A platform family is specified only for tasks using the Fargate launch type.", "Tags": "The metadata that you apply to the task definition to help you categorize and organize them. Each tag consists of a key and an optional value. You define both of them.\n\nThe following basic restrictions apply to tags:\n\n- Maximum number of tags per resource - 50\n- For each resource, each tag key must be unique, and each tag key can have only one value.\n- Maximum key length - 128 Unicode characters in UTF-8\n- Maximum value length - 256 Unicode characters in UTF-8\n- If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.\n- Tag keys and values are case-sensitive.\n- Do not use `aws:` , `AWS:` , or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for AWS use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.", - "TaskRoleArn": "The short name or full Amazon Resource Name (ARN) of the AWS Identity and Access Management role that grants containers in the task permission to call AWS APIs on your behalf. For informationabout the required IAM roles for Amazon ECS, see [IAM roles for Amazon ECS](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/security-ecs-iam-role-overview.html) in the *Amazon Elastic Container Service Developer Guide* .", + "TaskRoleArn": "The short name or full Amazon Resource Name (ARN) of the AWS Identity and Access Management role that grants containers in the task permission to call AWS APIs on your behalf. For more information, see [Amazon ECS Task Role](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html) in the *Amazon Elastic Container Service Developer Guide* .\n\nIAM roles for tasks on Windows require that the `-EnableTaskIAMRole` option is set when you launch the Amazon ECS-optimized Windows AMI. Your containers must also run some configuration code to use the feature. For more information, see [Windows IAM roles for tasks](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/windows_task_IAM_roles.html) in the *Amazon Elastic Container Service Developer Guide* .\n\n> String validation is done on the ECS side. If an invalid string value is given for `TaskRoleArn` , it may cause the Cloudformation job to hang.", "Volumes": "The list of data volume definitions for the task. For more information, see [Using data volumes in tasks](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using_data_volumes.html) in the *Amazon Elastic Container Service Developer Guide* .\n\n> The `host` and `sourcePath` parameters aren't supported for tasks run on AWS Fargate ." }, "AWS::ECS::TaskDefinition AuthorizationConfig": { @@ -13309,46 +14113,47 @@ "IAM": "Determines whether to use the Amazon ECS task role defined in a task definition when mounting the Amazon EFS file system. If it is turned on, transit encryption must be turned on in the `EFSVolumeConfiguration` . If this parameter is omitted, the default value of `DISABLED` is used. For more information, see [Using Amazon EFS access points](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/efs-volumes.html#efs-volume-accesspoints) in the *Amazon Elastic Container Service Developer Guide* ." }, "AWS::ECS::TaskDefinition ContainerDefinition": { - "Command": "The command that's passed to the container. This parameter maps to `Cmd` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `COMMAND` parameter to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) . For more information, see [https://docs.docker.com/engine/reference/builder/#cmd](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/builder/#cmd) . If there are multiple arguments, each argument is a separated string in the array.", - "Cpu": "The number of `cpu` units reserved for the container. This parameter maps to `CpuShares` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--cpu-shares` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\nThis field is optional for tasks using the Fargate launch type, and the only requirement is that the total amount of CPU reserved for all containers within a task be lower than the task-level `cpu` value.\n\n> You can determine the number of CPU units that are available per EC2 instance type by multiplying the vCPUs listed for that instance type on the [Amazon EC2 Instances](https://docs.aws.amazon.com/ec2/instance-types/) detail page by 1,024. \n\nLinux containers share unallocated CPU units with other containers on the container instance with the same ratio as their allocated amount. For example, if you run a single-container task on a single-core instance type with 512 CPU units specified for that container, and that's the only task running on the container instance, that container could use the full 1,024 CPU unit share at any given time. However, if you launched another copy of the same task on that container instance, each task is guaranteed a minimum of 512 CPU units when needed. Moreover, each container could float to higher CPU usage if the other container was not using it. If both tasks were 100% active all of the time, they would be limited to 512 CPU units.\n\nOn Linux container instances, the Docker daemon on the container instance uses the CPU value to calculate the relative CPU share ratios for running containers. For more information, see [CPU share constraint](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#cpu-share-constraint) in the Docker documentation. The minimum valid CPU share value that the Linux kernel allows is 2, and the maximum valid CPU share value that the Linux kernel allows is 262144. However, the CPU parameter isn't required, and you can use CPU values below 2 or above 262144 in your container definitions. For CPU values below 2 (including null) or above 262144, the behavior varies based on your Amazon ECS container agent version:\n\n- *Agent versions less than or equal to 1.1.0:* Null and zero CPU values are passed to Docker as 0, which Docker then converts to 1,024 CPU shares. CPU values of 1 are passed to Docker as 1, which the Linux kernel converts to two CPU shares.\n- *Agent versions greater than or equal to 1.2.0:* Null, zero, and CPU values of 1 are passed to Docker as 2.\n- *Agent versions greater than or equal to 1.84.0:* CPU values greater than 256 vCPU are passed to Docker as 256, which is equivalent to 262144 CPU shares.\n\nOn Windows container instances, the CPU limit is enforced as an absolute limit, or a quota. Windows containers only have access to the specified amount of CPU that's described in the task definition. A null or zero CPU value is passed to Docker as `0` , which Windows interprets as 1% of one CPU.", + "Command": "The command that's passed to the container. This parameter maps to `Cmd` in the docker container create command and the `COMMAND` parameter to docker run. If there are multiple arguments, each argument is a separated string in the array.", + "Cpu": "The number of `cpu` units reserved for the container. This parameter maps to `CpuShares` in the docker container create commandand the `--cpu-shares` option to docker run.\n\nThis field is optional for tasks using the Fargate launch type, and the only requirement is that the total amount of CPU reserved for all containers within a task be lower than the task-level `cpu` value.\n\n> You can determine the number of CPU units that are available per EC2 instance type by multiplying the vCPUs listed for that instance type on the [Amazon EC2 Instances](https://docs.aws.amazon.com/ec2/instance-types/) detail page by 1,024. \n\nLinux containers share unallocated CPU units with other containers on the container instance with the same ratio as their allocated amount. For example, if you run a single-container task on a single-core instance type with 512 CPU units specified for that container, and that's the only task running on the container instance, that container could use the full 1,024 CPU unit share at any given time. However, if you launched another copy of the same task on that container instance, each task is guaranteed a minimum of 512 CPU units when needed. Moreover, each container could float to higher CPU usage if the other container was not using it. If both tasks were 100% active all of the time, they would be limited to 512 CPU units.\n\nOn Linux container instances, the Docker daemon on the container instance uses the CPU value to calculate the relative CPU share ratios for running containers. The minimum valid CPU share value that the Linux kernel allows is 2, and the maximum valid CPU share value that the Linux kernel allows is 262144. However, the CPU parameter isn't required, and you can use CPU values below 2 or above 262144 in your container definitions. For CPU values below 2 (including null) or above 262144, the behavior varies based on your Amazon ECS container agent version:\n\n- *Agent versions less than or equal to 1.1.0:* Null and zero CPU values are passed to Docker as 0, which Docker then converts to 1,024 CPU shares. CPU values of 1 are passed to Docker as 1, which the Linux kernel converts to two CPU shares.\n- *Agent versions greater than or equal to 1.2.0:* Null, zero, and CPU values of 1 are passed to Docker as 2.\n- *Agent versions greater than or equal to 1.84.0:* CPU values greater than 256 vCPU are passed to Docker as 256, which is equivalent to 262144 CPU shares.\n\nOn Windows container instances, the CPU limit is enforced as an absolute limit, or a quota. Windows containers only have access to the specified amount of CPU that's described in the task definition. A null or zero CPU value is passed to Docker as `0` , which Windows interprets as 1% of one CPU.", "CredentialSpecs": "A list of ARNs in SSM or Amazon S3 to a credential spec ( `CredSpec` ) file that configures the container for Active Directory authentication. We recommend that you use this parameter instead of the `dockerSecurityOptions` . The maximum number of ARNs is 1.\n\nThere are two formats for each ARN.\n\n- **credentialspecdomainless:MyARN** - You use `credentialspecdomainless:MyARN` to provide a `CredSpec` with an additional section for a secret in AWS Secrets Manager . You provide the login credentials to the domain in the secret.\n\nEach task that runs on any container instance can join different domains.\n\nYou can use this format without joining the container instance to a domain.\n- **credentialspec:MyARN** - You use `credentialspec:MyARN` to provide a `CredSpec` for a single domain.\n\nYou must join the container instance to the domain before you start any tasks that use this task definition.\n\nIn both formats, replace `MyARN` with the ARN in SSM or Amazon S3.\n\nIf you provide a `credentialspecdomainless:MyARN` , the `credspec` must provide a ARN in AWS Secrets Manager for a secret containing the username, password, and the domain to connect to. For better security, the instance isn't joined to the domain for domainless authentication. Other applications on the instance can't use the domainless credentials. You can use this parameter to run tasks on the same instance, even it the tasks need to join different domains. For more information, see [Using gMSAs for Windows Containers](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/windows-gmsa.html) and [Using gMSAs for Linux Containers](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/linux-gmsa.html) .", "DependsOn": "The dependencies defined for container startup and shutdown. A container can contain multiple dependencies. When a dependency is defined for container startup, for container shutdown it is reversed.\n\nFor tasks using the EC2 launch type, the container instances require at least version 1.26.0 of the container agent to turn on container dependencies. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see [Updating the Amazon ECS Container Agent](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-update.html) in the *Amazon Elastic Container Service Developer Guide* . If you're using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the `ecs-init` package. If your container instances are launched from version `20190301` or later, then they contain the required versions of the container agent and `ecs-init` . For more information, see [Amazon ECS-optimized Linux AMI](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html) in the *Amazon Elastic Container Service Developer Guide* .\n\nFor tasks using the Fargate launch type, the task or service requires the following platforms:\n\n- Linux platform version `1.3.0` or later.\n- Windows platform version `1.0.0` or later.\n\nIf the task definition is used in a blue/green deployment that uses [AWS::CodeDeploy::DeploymentGroup BlueGreenDeploymentConfiguration](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-codedeploy-deploymentgroup-bluegreendeploymentconfiguration.html) , the `dependsOn` parameter is not supported. For more information see [Issue #680](https://docs.aws.amazon.com/https://github.com/aws-cloudformation/cloudformation-coverage-roadmap/issues/680) on the on the GitHub website.", - "DisableNetworking": "When this parameter is true, networking is off within the container. This parameter maps to `NetworkDisabled` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) .\n\n> This parameter is not supported for Windows containers.", - "DnsSearchDomains": "A list of DNS search domains that are presented to the container. This parameter maps to `DnsSearch` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--dns-search` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> This parameter is not supported for Windows containers.", - "DnsServers": "A list of DNS servers that are presented to the container. This parameter maps to `Dns` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--dns` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> This parameter is not supported for Windows containers.", - "DockerLabels": "A key/value map of labels to add to the container. This parameter maps to `Labels` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--label` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) . This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`", - "DockerSecurityOptions": "A list of strings to provide custom configuration for multiple security systems. For more information about valid values, see [Docker Run Security Configuration](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) . This field isn't valid for containers in tasks using the Fargate launch type.\n\nFor Linux tasks on EC2, this parameter can be used to reference custom labels for SELinux and AppArmor multi-level security systems.\n\nFor any tasks on EC2, this parameter can be used to reference a credential spec file that configures a container for Active Directory authentication. For more information, see [Using gMSAs for Windows Containers](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/windows-gmsa.html) and [Using gMSAs for Linux Containers](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/linux-gmsa.html) in the *Amazon Elastic Container Service Developer Guide* .\n\nThis parameter maps to `SecurityOpt` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--security-opt` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> The Amazon ECS container agent running on a container instance must register with the `ECS_SELINUX_CAPABLE=true` or `ECS_APPARMOR_CAPABLE=true` environment variables before containers placed on that instance can use these security options. For more information, see [Amazon ECS Container Agent Configuration](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) in the *Amazon Elastic Container Service Developer Guide* . \n\nFor more information about valid values, see [Docker Run Security Configuration](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\nValid values: \"no-new-privileges\" | \"apparmor:PROFILE\" | \"label:value\" | \"credentialspec:CredentialSpecFilePath\"", - "EntryPoint": "> Early versions of the Amazon ECS container agent don't properly handle `entryPoint` parameters. If you have problems using `entryPoint` , update your container agent or enter your commands and arguments as `command` array items instead. \n\nThe entry point that's passed to the container. This parameter maps to `Entrypoint` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--entrypoint` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) . For more information, see [https://docs.docker.com/engine/reference/builder/#entrypoint](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/builder/#entrypoint) .", - "Environment": "The environment variables to pass to a container. This parameter maps to `Env` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--env` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> We don't recommend that you use plaintext environment variables for sensitive information, such as credential data.", - "EnvironmentFiles": "A list of files containing the environment variables to pass to a container. This parameter maps to the `--env-file` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\nYou can specify up to ten environment files. The file must have a `.env` file extension. Each line in an environment file contains an environment variable in `VARIABLE=VALUE` format. Lines beginning with `#` are treated as comments and are ignored. For more information about the environment variable file syntax, see [Declare default environment variables in file](https://docs.aws.amazon.com/https://docs.docker.com/compose/env-file/) .\n\nIf there are environment variables specified using the `environment` parameter in a container definition, they take precedence over the variables contained within an environment file. If multiple environment files are specified that contain the same variable, they're processed from the top down. We recommend that you use unique variable names. For more information, see [Specifying Environment Variables](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/taskdef-envfiles.html) in the *Amazon Elastic Container Service Developer Guide* .", + "DisableNetworking": "When this parameter is true, networking is off within the container. This parameter maps to `NetworkDisabled` in the docker container create command.\n\n> This parameter is not supported for Windows containers.", + "DnsSearchDomains": "A list of DNS search domains that are presented to the container. This parameter maps to `DnsSearch` in the docker container create command and the `--dns-search` option to docker run.\n\n> This parameter is not supported for Windows containers.", + "DnsServers": "A list of DNS servers that are presented to the container. This parameter maps to `Dns` in the docker container create command and the `--dns` option to docker run.\n\n> This parameter is not supported for Windows containers.", + "DockerLabels": "A key/value map of labels to add to the container. This parameter maps to `Labels` in the docker container create command and the `--label` option to docker run. This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`", + "DockerSecurityOptions": "A list of strings to provide custom configuration for multiple security systems. This field isn't valid for containers in tasks using the Fargate launch type.\n\nFor Linux tasks on EC2, this parameter can be used to reference custom labels for SELinux and AppArmor multi-level security systems.\n\nFor any tasks on EC2, this parameter can be used to reference a credential spec file that configures a container for Active Directory authentication. For more information, see [Using gMSAs for Windows Containers](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/windows-gmsa.html) and [Using gMSAs for Linux Containers](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/linux-gmsa.html) in the *Amazon Elastic Container Service Developer Guide* .\n\nThis parameter maps to `SecurityOpt` in the docker container create command and the `--security-opt` option to docker run.\n\n> The Amazon ECS container agent running on a container instance must register with the `ECS_SELINUX_CAPABLE=true` or `ECS_APPARMOR_CAPABLE=true` environment variables before containers placed on that instance can use these security options. For more information, see [Amazon ECS Container Agent Configuration](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) in the *Amazon Elastic Container Service Developer Guide* . \n\nValid values: \"no-new-privileges\" | \"apparmor:PROFILE\" | \"label:value\" | \"credentialspec:CredentialSpecFilePath\"", + "EntryPoint": "> Early versions of the Amazon ECS container agent don't properly handle `entryPoint` parameters. If you have problems using `entryPoint` , update your container agent or enter your commands and arguments as `command` array items instead. \n\nThe entry point that's passed to the container. This parameter maps to `Entrypoint` in the docker container create command and the `--entrypoint` option to docker run.", + "Environment": "The environment variables to pass to a container. This parameter maps to `Env` in the docker container create command and the `--env` option to docker run.\n\n> We don't recommend that you use plaintext environment variables for sensitive information, such as credential data.", + "EnvironmentFiles": "A list of files containing the environment variables to pass to a container. This parameter maps to the `--env-file` option to docker run.\n\nYou can specify up to ten environment files. The file must have a `.env` file extension. Each line in an environment file contains an environment variable in `VARIABLE=VALUE` format. Lines beginning with `#` are treated as comments and are ignored.\n\nIf there are environment variables specified using the `environment` parameter in a container definition, they take precedence over the variables contained within an environment file. If multiple environment files are specified that contain the same variable, they're processed from the top down. We recommend that you use unique variable names. For more information, see [Specifying Environment Variables](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/taskdef-envfiles.html) in the *Amazon Elastic Container Service Developer Guide* .", "Essential": "If the `essential` parameter of a container is marked as `true` , and that container fails or stops for any reason, all other containers that are part of the task are stopped. If the `essential` parameter of a container is marked as `false` , its failure doesn't affect the rest of the containers in a task. If this parameter is omitted, a container is assumed to be essential.\n\nAll tasks must have at least one essential container. If you have an application that's composed of multiple containers, group containers that are used for a common purpose into components, and separate the different components into multiple task definitions. For more information, see [Application Architecture](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/application_architecture.html) in the *Amazon Elastic Container Service Developer Guide* .", - "ExtraHosts": "A list of hostnames and IP address mappings to append to the `/etc/hosts` file on the container. This parameter maps to `ExtraHosts` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--add-host` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> This parameter isn't supported for Windows containers or tasks that use the `awsvpc` network mode.", + "ExtraHosts": "A list of hostnames and IP address mappings to append to the `/etc/hosts` file on the container. This parameter maps to `ExtraHosts` in the docker container create command and the `--add-host` option to docker run.\n\n> This parameter isn't supported for Windows containers or tasks that use the `awsvpc` network mode.", "FirelensConfiguration": "The FireLens configuration for the container. This is used to specify and configure a log router for container logs. For more information, see [Custom Log Routing](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using_firelens.html) in the *Amazon Elastic Container Service Developer Guide* .", - "HealthCheck": "The container health check command and associated configuration parameters for the container. This parameter maps to `HealthCheck` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `HEALTHCHECK` parameter of [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .", - "Hostname": "The hostname to use for your container. This parameter maps to `Hostname` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--hostname` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> The `hostname` parameter is not supported if you're using the `awsvpc` network mode.", - "Image": "The image used to start a container. This string is passed directly to the Docker daemon. By default, images in the Docker Hub registry are available. Other repositories are specified with either `*repository-url* / *image* : *tag*` or `*repository-url* / *image* @ *digest*` . Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons, periods, forward slashes, and number signs are allowed. This parameter maps to `Image` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `IMAGE` parameter of [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n- When a new task starts, the Amazon ECS container agent pulls the latest version of the specified image and tag for the container to use. However, subsequent updates to a repository image aren't propagated to already running tasks.\n- Images in Amazon ECR repositories can be specified by either using the full `registry/repository:tag` or `registry/repository@digest` . For example, `012345678910.dkr.ecr..amazonaws.com/:latest` or `012345678910.dkr.ecr..amazonaws.com/@sha256:94afd1f2e64d908bc90dbca0035a5b567EXAMPLE` .\n- Images in official repositories on Docker Hub use a single name (for example, `ubuntu` or `mongo` ).\n- Images in other repositories on Docker Hub are qualified with an organization name (for example, `amazon/amazon-ecs-agent` ).\n- Images in other online repositories are qualified further by a domain name (for example, `quay.io/assemblyline/ubuntu` ).", - "Interactive": "When this parameter is `true` , you can deploy containerized applications that require `stdin` or a `tty` to be allocated. This parameter maps to `OpenStdin` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--interactive` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .", - "Links": "The `links` parameter allows containers to communicate with each other without the need for port mappings. This parameter is only supported if the network mode of a task definition is `bridge` . The `name:internalName` construct is analogous to `name:alias` in Docker links. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. For more information about linking Docker containers, go to [Legacy container links](https://docs.aws.amazon.com/https://docs.docker.com/network/links/) in the Docker documentation. This parameter maps to `Links` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--link` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> This parameter is not supported for Windows containers. > Containers that are collocated on a single container instance may be able to communicate with each other without requiring links or host port mappings. Network isolation is achieved on the container instance using security groups and VPC settings.", + "HealthCheck": "The container health check command and associated configuration parameters for the container. This parameter maps to `HealthCheck` in the docker container create command and the `HEALTHCHECK` parameter of docker run.", + "Hostname": "The hostname to use for your container. This parameter maps to `Hostname` in the docker container create command and the `--hostname` option to docker run.\n\n> The `hostname` parameter is not supported if you're using the `awsvpc` network mode.", + "Image": "The image used to start a container. This string is passed directly to the Docker daemon. By default, images in the Docker Hub registry are available. Other repositories are specified with either `*repository-url* / *image* : *tag*` or `*repository-url* / *image* @ *digest*` . Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons, periods, forward slashes, and number signs are allowed. This parameter maps to `Image` in the docker container create command and the `IMAGE` parameter of docker run.\n\n- When a new task starts, the Amazon ECS container agent pulls the latest version of the specified image and tag for the container to use. However, subsequent updates to a repository image aren't propagated to already running tasks.\n- Images in Amazon ECR repositories can be specified by either using the full `registry/repository:tag` or `registry/repository@digest` . For example, `012345678910.dkr.ecr..amazonaws.com/:latest` or `012345678910.dkr.ecr..amazonaws.com/@sha256:94afd1f2e64d908bc90dbca0035a5b567EXAMPLE` .\n- Images in official repositories on Docker Hub use a single name (for example, `ubuntu` or `mongo` ).\n- Images in other repositories on Docker Hub are qualified with an organization name (for example, `amazon/amazon-ecs-agent` ).\n- Images in other online repositories are qualified further by a domain name (for example, `quay.io/assemblyline/ubuntu` ).", + "Interactive": "When this parameter is `true` , you can deploy containerized applications that require `stdin` or a `tty` to be allocated. This parameter maps to `OpenStdin` in the docker container create command and the `--interactive` option to docker run.", + "Links": "The `links` parameter allows containers to communicate with each other without the need for port mappings. This parameter is only supported if the network mode of a task definition is `bridge` . The `name:internalName` construct is analogous to `name:alias` in Docker links. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed.. This parameter maps to `Links` in the docker container create command and the `--link` option to docker run.\n\n> This parameter is not supported for Windows containers. > Containers that are collocated on a single container instance may be able to communicate with each other without requiring links or host port mappings. Network isolation is achieved on the container instance using security groups and VPC settings.", "LinuxParameters": "Linux-specific modifications that are applied to the container, such as Linux kernel capabilities. For more information see [KernelCapabilities](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_KernelCapabilities.html) .\n\n> This parameter is not supported for Windows containers.", - "LogConfiguration": "The log configuration specification for the container.\n\nThis parameter maps to `LogConfig` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--log-driver` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) . By default, containers use the same logging driver that the Docker daemon uses. However, the container may use a different logging driver than the Docker daemon by specifying a log driver with this parameter in the container definition. To use a different logging driver for a container, the log system must be configured properly on the container instance (or on a different log server for remote logging options). For more information on the options for different supported log drivers, see [Configure logging drivers](https://docs.aws.amazon.com/https://docs.docker.com/engine/admin/logging/overview/) in the Docker documentation.\n\n> Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon (shown in the [LogConfiguration](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_LogConfiguration.html) data type). Additional log drivers may be available in future releases of the Amazon ECS container agent. \n\nThis parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`\n\n> The Amazon ECS container agent running on a container instance must register the logging drivers available on that instance with the `ECS_AVAILABLE_LOGGING_DRIVERS` environment variable before containers placed on that instance can use these log configuration options. For more information, see [Amazon ECS Container Agent Configuration](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) in the *Amazon Elastic Container Service Developer Guide* .", + "LogConfiguration": "The log configuration specification for the container.\n\nThis parameter maps to `LogConfig` in the docker Create a container command and the `--log-driver` option to docker run. By default, containers use the same logging driver that the Docker daemon uses. However, the container may use a different logging driver than the Docker daemon by specifying a log driver with this parameter in the container definition. To use a different logging driver for a container, the log system must be configured properly on the container instance (or on a different log server for remote logging options). For more information on the options for different supported log drivers, see [Configure logging drivers](https://docs.aws.amazon.com/https://docs.docker.com/engine/admin/logging/overview/) in the Docker documentation.\n\n> Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon (shown in the [LogConfiguration](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_LogConfiguration.html) data type). Additional log drivers may be available in future releases of the Amazon ECS container agent. \n\nThis parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`\n\n> The Amazon ECS container agent running on a container instance must register the logging drivers available on that instance with the `ECS_AVAILABLE_LOGGING_DRIVERS` environment variable before containers placed on that instance can use these log configuration options. For more information, see [Amazon ECS Container Agent Configuration](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) in the *Amazon Elastic Container Service Developer Guide* .", "Memory": "The amount (in MiB) of memory to present to the container. If your container attempts to exceed the memory specified here, the container is killed. The total amount of memory reserved for all containers within a task must be lower than the task `memory` value, if one is specified. This parameter maps to `Memory` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--memory` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\nIf using the Fargate launch type, this parameter is optional.\n\nIf using the EC2 launch type, you must specify either a task-level memory value or a container-level memory value. If you specify both a container-level `memory` and `memoryReservation` value, `memory` must be greater than `memoryReservation` . If you specify `memoryReservation` , then that value is subtracted from the available memory resources for the container instance where the container is placed. Otherwise, the value of `memory` is used.\n\nThe Docker 20.10.0 or later daemon reserves a minimum of 6 MiB of memory for a container, so you should not specify fewer than 6 MiB of memory for your containers.\n\nThe Docker 19.03.13-ce or earlier daemon reserves a minimum of 4 MiB of memory for a container, so you should not specify fewer than 4 MiB of memory for your containers.", - "MemoryReservation": "The soft limit (in MiB) of memory to reserve for the container. When system memory is under heavy contention, Docker attempts to keep the container memory to this soft limit. However, your container can consume more memory when it needs to, up to either the hard limit specified with the `memory` parameter (if applicable), or all of the available memory on the container instance, whichever comes first. This parameter maps to `MemoryReservation` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--memory-reservation` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\nIf a task-level memory value is not specified, you must specify a non-zero integer for one or both of `memory` or `memoryReservation` in a container definition. If you specify both, `memory` must be greater than `memoryReservation` . If you specify `memoryReservation` , then that value is subtracted from the available memory resources for the container instance where the container is placed. Otherwise, the value of `memory` is used.\n\nFor example, if your container normally uses 128 MiB of memory, but occasionally bursts to 256 MiB of memory for short periods of time, you can set a `memoryReservation` of 128 MiB, and a `memory` hard limit of 300 MiB. This configuration would allow the container to only reserve 128 MiB of memory from the remaining resources on the container instance, but also allow the container to consume more memory resources when needed.\n\nThe Docker 20.10.0 or later daemon reserves a minimum of 6 MiB of memory for a container. So, don't specify less than 6 MiB of memory for your containers.\n\nThe Docker 19.03.13-ce or earlier daemon reserves a minimum of 4 MiB of memory for a container. So, don't specify less than 4 MiB of memory for your containers.", - "MountPoints": "The mount points for data volumes in your container.\n\nThis parameter maps to `Volumes` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--volume` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\nWindows containers can mount whole directories on the same drive as `$env:ProgramData` . Windows containers can't mount directories on a different drive, and mount point can't be across drives.", - "Name": "The name of a container. If you're linking multiple containers together in a task definition, the `name` of one container can be entered in the `links` of another container to connect the containers. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. This parameter maps to `name` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--name` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .", + "MemoryReservation": "The soft limit (in MiB) of memory to reserve for the container. When system memory is under heavy contention, Docker attempts to keep the container memory to this soft limit. However, your container can consume more memory when it needs to, up to either the hard limit specified with the `memory` parameter (if applicable), or all of the available memory on the container instance, whichever comes first. This parameter maps to `MemoryReservation` in the docker container create command and the `--memory-reservation` option to docker run.\n\nIf a task-level memory value is not specified, you must specify a non-zero integer for one or both of `memory` or `memoryReservation` in a container definition. If you specify both, `memory` must be greater than `memoryReservation` . If you specify `memoryReservation` , then that value is subtracted from the available memory resources for the container instance where the container is placed. Otherwise, the value of `memory` is used.\n\nFor example, if your container normally uses 128 MiB of memory, but occasionally bursts to 256 MiB of memory for short periods of time, you can set a `memoryReservation` of 128 MiB, and a `memory` hard limit of 300 MiB. This configuration would allow the container to only reserve 128 MiB of memory from the remaining resources on the container instance, but also allow the container to consume more memory resources when needed.\n\nThe Docker 20.10.0 or later daemon reserves a minimum of 6 MiB of memory for a container. So, don't specify less than 6 MiB of memory for your containers.\n\nThe Docker 19.03.13-ce or earlier daemon reserves a minimum of 4 MiB of memory for a container. So, don't specify less than 4 MiB of memory for your containers.", + "MountPoints": "The mount points for data volumes in your container.\n\nThis parameter maps to `Volumes` in the docker container create command and the `--volume` option to docker run.\n\nWindows containers can mount whole directories on the same drive as `$env:ProgramData` . Windows containers can't mount directories on a different drive, and mount point can't be across drives.", + "Name": "The name of a container. If you're linking multiple containers together in a task definition, the `name` of one container can be entered in the `links` of another container to connect the containers. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. This parameter maps to `name` in the docker container create command and the `--name` option to docker run.", "PortMappings": "The list of port mappings for the container. Port mappings allow containers to access ports on the host container instance to send or receive traffic.\n\nFor task definitions that use the `awsvpc` network mode, you should only specify the `containerPort` . The `hostPort` can be left blank or it must be the same value as the `containerPort` .\n\nPort mappings on Windows use the `NetNAT` gateway address rather than `localhost` . There is no loopback for port mappings on Windows, so you cannot access a container's mapped port from the host itself.\n\nThis parameter maps to `PortBindings` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--publish` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) . If the network mode of a task definition is set to `none` , then you can't specify port mappings. If the network mode of a task definition is set to `host` , then host ports must either be undefined or they must match the container port in the port mapping.\n\n> After a task reaches the `RUNNING` status, manual and automatic host and container port assignments are visible in the *Network Bindings* section of a container description for a selected task in the Amazon ECS console. The assignments are also visible in the `networkBindings` section [DescribeTasks](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_DescribeTasks.html) responses.", - "Privileged": "When this parameter is true, the container is given elevated privileges on the host container instance (similar to the `root` user). This parameter maps to `Privileged` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--privileged` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> This parameter is not supported for Windows containers or tasks run on AWS Fargate .", - "PseudoTerminal": "When this parameter is `true` , a TTY is allocated. This parameter maps to `Tty` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--tty` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .", - "ReadonlyRootFilesystem": "When this parameter is true, the container is given read-only access to its root file system. This parameter maps to `ReadonlyRootfs` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--read-only` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> This parameter is not supported for Windows containers.", + "Privileged": "When this parameter is true, the container is given elevated privileges on the host container instance (similar to the `root` user). This parameter maps to `Privileged` in the docker container create command and the `--privileged` option to docker run\n\n> This parameter is not supported for Windows containers or tasks run on AWS Fargate .", + "PseudoTerminal": "When this parameter is `true` , a TTY is allocated. This parameter maps to `Tty` in the docker container create command and the `--tty` option to docker run.", + "ReadonlyRootFilesystem": "When this parameter is true, the container is given read-only access to its root file system. This parameter maps to `ReadonlyRootfs` in the docker container create command and the `--read-only` option to docker run.\n\n> This parameter is not supported for Windows containers.", "RepositoryCredentials": "The private repository authentication credentials to use.", "ResourceRequirements": "The type and amount of a resource to assign to a container. The only supported resource is a GPU.", + "RestartPolicy": "The restart policy for a container. When you set up a restart policy, Amazon ECS can restart the container without needing to replace the task. For more information, see [Restart individual containers in Amazon ECS tasks with container restart policies](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/container-restart-policy.html) in the *Amazon Elastic Container Service Developer Guide* .", "Secrets": "The secrets to pass to the container. For more information, see [Specifying Sensitive Data](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/specifying-sensitive-data.html) in the *Amazon Elastic Container Service Developer Guide* .", "StartTimeout": "Time duration (in seconds) to wait before giving up on resolving dependencies for a container. For example, you specify two containers in a task definition with containerA having a dependency on containerB reaching a `COMPLETE` , `SUCCESS` , or `HEALTHY` status. If a `startTimeout` value is specified for containerB and it doesn't reach the desired status within that time then containerA gives up and not start. This results in the task transitioning to a `STOPPED` state.\n\n> When the `ECS_CONTAINER_START_TIMEOUT` container agent configuration variable is used, it's enforced independently from this start timeout value. \n\nFor tasks using the Fargate launch type, the task or service requires the following platforms:\n\n- Linux platform version `1.3.0` or later.\n- Windows platform version `1.0.0` or later.\n\nFor tasks using the EC2 launch type, your container instances require at least version `1.26.0` of the container agent to use a container start timeout value. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see [Updating the Amazon ECS Container Agent](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-update.html) in the *Amazon Elastic Container Service Developer Guide* . If you're using an Amazon ECS-optimized Linux AMI, your instance needs at least version `1.26.0-1` of the `ecs-init` package. If your container instances are launched from version `20190301` or later, then they contain the required versions of the container agent and `ecs-init` . For more information, see [Amazon ECS-optimized Linux AMI](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html) in the *Amazon Elastic Container Service Developer Guide* .\n\nThe valid values for Fargate are 2-120 seconds.", - "StopTimeout": "Time duration (in seconds) to wait before the container is forcefully killed if it doesn't exit normally on its own.\n\nFor tasks using the Fargate launch type, the task or service requires the following platforms:\n\n- Linux platform version `1.3.0` or later.\n- Windows platform version `1.0.0` or later.\n\nThe max stop timeout value is 120 seconds and if the parameter is not specified, the default value of 30 seconds is used.\n\nFor tasks that use the EC2 launch type, if the `stopTimeout` parameter isn't specified, the value set for the Amazon ECS container agent configuration variable `ECS_CONTAINER_STOP_TIMEOUT` is used. If neither the `stopTimeout` parameter or the `ECS_CONTAINER_STOP_TIMEOUT` agent configuration variable are set, then the default values of 30 seconds for Linux containers and 30 seconds on Windows containers are used. Your container instances require at least version 1.26.0 of the container agent to use a container stop timeout value. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see [Updating the Amazon ECS Container Agent](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-update.html) in the *Amazon Elastic Container Service Developer Guide* . If you're using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the `ecs-init` package. If your container instances are launched from version `20190301` or later, then they contain the required versions of the container agent and `ecs-init` . For more information, see [Amazon ECS-optimized Linux AMI](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html) in the *Amazon Elastic Container Service Developer Guide* .\n\nThe valid values are 2-120 seconds.", - "SystemControls": "A list of namespaced kernel parameters to set in the container. This parameter maps to `Sysctls` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--sysctl` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) . For example, you can configure `net.ipv4.tcp_keepalive_time` setting to maintain longer lived connections.", + "StopTimeout": "Time duration (in seconds) to wait before the container is forcefully killed if it doesn't exit normally on its own.\n\nFor tasks using the Fargate launch type, the task or service requires the following platforms:\n\n- Linux platform version `1.3.0` or later.\n- Windows platform version `1.0.0` or later.\n\nFor tasks that use the Fargate launch type, the max stop timeout value is 120 seconds and if the parameter is not specified, the default value of 30 seconds is used.\n\nFor tasks that use the EC2 launch type, if the `stopTimeout` parameter isn't specified, the value set for the Amazon ECS container agent configuration variable `ECS_CONTAINER_STOP_TIMEOUT` is used. If neither the `stopTimeout` parameter or the `ECS_CONTAINER_STOP_TIMEOUT` agent configuration variable are set, then the default values of 30 seconds for Linux containers and 30 seconds on Windows containers are used. Your container instances require at least version 1.26.0 of the container agent to use a container stop timeout value. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see [Updating the Amazon ECS Container Agent](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-update.html) in the *Amazon Elastic Container Service Developer Guide* . If you're using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the `ecs-init` package. If your container instances are launched from version `20190301` or later, then they contain the required versions of the container agent and `ecs-init` . For more information, see [Amazon ECS-optimized Linux AMI](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html) in the *Amazon Elastic Container Service Developer Guide* .\n\nThe valid values for Fargate are 2-120 seconds.", + "SystemControls": "A list of namespaced kernel parameters to set in the container. This parameter maps to `Sysctls` in the docker container create command and the `--sysctl` option to docker run. For example, you can configure `net.ipv4.tcp_keepalive_time` setting to maintain longer lived connections.", "Ulimits": "A list of `ulimits` to set in the container. This parameter maps to `Ulimits` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--ulimit` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) . Valid naming values are displayed in the [Ulimit](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_Ulimit.html) data type. This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`\n\n> This parameter is not supported for Windows containers.", - "User": "The user to use inside the container. This parameter maps to `User` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--user` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> When running tasks using the `host` network mode, don't run containers using the root user (UID 0). We recommend using a non-root user for better security. \n\nYou can specify the `user` using the following formats. If specifying a UID or GID, you must specify it as a positive integer.\n\n- `user`\n- `user:group`\n- `uid`\n- `uid:gid`\n- `user:gid`\n- `uid:group`\n\n> This parameter is not supported for Windows containers.", - "VolumesFrom": "Data volumes to mount from another container. This parameter maps to `VolumesFrom` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--volumes-from` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .", - "WorkingDirectory": "The working directory to run commands inside the container in. This parameter maps to `WorkingDir` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--workdir` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) ." + "User": "The user to use inside the container. This parameter maps to `User` in the docker container create command and the `--user` option to docker run.\n\n> When running tasks using the `host` network mode, don't run containers using the root user (UID 0). We recommend using a non-root user for better security. \n\nYou can specify the `user` using the following formats. If specifying a UID or GID, you must specify it as a positive integer.\n\n- `user`\n- `user:group`\n- `uid`\n- `uid:gid`\n- `user:gid`\n- `uid:group`\n\n> This parameter is not supported for Windows containers.", + "VolumesFrom": "Data volumes to mount from another container. This parameter maps to `VolumesFrom` in the docker container create command and the `--volumes-from` option to docker run.", + "WorkingDirectory": "The working directory to run commands inside the container in. This parameter maps to `WorkingDir` in the docker container create command and the `--workdir` option to docker run." }, "AWS::ECS::TaskDefinition ContainerDependency": { "Condition": "The dependency condition of the container. The following are the available conditions and their behavior:\n\n- `START` - This condition emulates the behavior of links and volumes today. It validates that a dependent container is started before permitting other containers to start.\n- `COMPLETE` - This condition validates that a dependent container runs to completion (exits) before permitting other containers to start. This can be useful for nonessential containers that run a script and then exit. This condition can't be set on an essential container.\n- `SUCCESS` - This condition is the same as `COMPLETE` , but it also requires that the container exits with a `zero` status. This condition can't be set on an essential container.\n- `HEALTHY` - This condition validates that the dependent container passes its Docker health check before permitting other containers to start. This requires that the dependent container has health checks configured. This condition is confirmed only at task startup.", @@ -13361,9 +14166,9 @@ }, "AWS::ECS::TaskDefinition DockerVolumeConfiguration": { "Autoprovision": "If this value is `true` , the Docker volume is created if it doesn't already exist.\n\n> This field is only used if the `scope` is `shared` .", - "Driver": "The Docker volume driver to use. The driver value must match the driver name provided by Docker because it is used for task placement. If the driver was installed using the Docker plugin CLI, use `docker plugin ls` to retrieve the driver name from your container instance. If the driver was installed using another method, use Docker plugin discovery to retrieve the driver name. For more information, see [Docker plugin discovery](https://docs.aws.amazon.com/https://docs.docker.com/engine/extend/plugin_api/#plugin-discovery) . This parameter maps to `Driver` in the [Create a volume](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/VolumeCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `xxdriver` option to [docker volume create](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/commandline/volume_create/) .", - "DriverOpts": "A map of Docker driver-specific options passed through. This parameter maps to `DriverOpts` in the [Create a volume](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/VolumeCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `xxopt` option to [docker volume create](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/commandline/volume_create/) .", - "Labels": "Custom metadata to add to your Docker volume. This parameter maps to `Labels` in the [Create a volume](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/VolumeCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `xxlabel` option to [docker volume create](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/commandline/volume_create/) .", + "Driver": "The Docker volume driver to use. The driver value must match the driver name provided by Docker because it is used for task placement. If the driver was installed using the Docker plugin CLI, use `docker plugin ls` to retrieve the driver name from your container instance. If the driver was installed using another method, use Docker plugin discovery to retrieve the driver name. This parameter maps to `Driver` in the docker container create command and the `xxdriver` option to docker volume create.", + "DriverOpts": "A map of Docker driver-specific options passed through. This parameter maps to `DriverOpts` in the docker create-volume command and the `xxopt` option to docker volume create.", + "Labels": "Custom metadata to add to your Docker volume. This parameter maps to `Labels` in the docker container create command and the `xxlabel` option to docker volume create.", "Scope": "The scope for the Docker volume that determines its lifecycle. Docker volumes that are scoped to a `task` are automatically provisioned when the task starts and destroyed when the task stops. Docker volumes that are scoped as `shared` persist after the task stops." }, "AWS::ECS::TaskDefinition EFSVolumeConfiguration": { @@ -13381,8 +14186,8 @@ "SizeInGiB": "The total amount, in GiB, of ephemeral storage to set for the task. The minimum supported value is `20` GiB and the maximum supported value is `200` GiB." }, "AWS::ECS::TaskDefinition FSxAuthorizationConfig": { - "CredentialsParameter": "", - "Domain": "" + "CredentialsParameter": "The authorization credential option to use. The authorization credential options can be provided using either the Amazon Resource Name (ARN) of an AWS Secrets Manager secret or SSM Parameter Store parameter. The ARN refers to the stored credentials.", + "Domain": "A fully qualified domain name hosted by an [AWS Directory Service](https://docs.aws.amazon.com/directoryservice/latest/admin-guide/directory_microsoft_ad.html) Managed Microsoft AD (Active Directory) or self-hosted AD on Amazon EC2." }, "AWS::ECS::TaskDefinition FSxWindowsFileServerVolumeConfiguration": { "AuthorizationConfig": "The authorization configuration details for the Amazon FSx for Windows File Server file system.", @@ -13394,7 +14199,7 @@ "Type": "The log router to use. The valid values are `fluentd` or `fluentbit` ." }, "AWS::ECS::TaskDefinition HealthCheck": { - "Command": "A string array representing the command that the container runs to determine if it is healthy. The string array must start with `CMD` to run the command arguments directly, or `CMD-SHELL` to run the command with the container's default shell.\n\nWhen you use the AWS Management Console JSON panel, the AWS Command Line Interface , or the APIs, enclose the list of commands in double quotes and brackets.\n\n`[ \"CMD-SHELL\", \"curl -f http://localhost/ || exit 1\" ]`\n\nYou don't include the double quotes and brackets when you use the AWS Management Console.\n\n`CMD-SHELL, curl -f http://localhost/ || exit 1`\n\nAn exit code of 0 indicates success, and non-zero exit code indicates failure. For more information, see `HealthCheck` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) .", + "Command": "A string array representing the command that the container runs to determine if it is healthy. The string array must start with `CMD` to run the command arguments directly, or `CMD-SHELL` to run the command with the container's default shell.\n\nWhen you use the AWS Management Console JSON panel, the AWS Command Line Interface , or the APIs, enclose the list of commands in double quotes and brackets.\n\n`[ \"CMD-SHELL\", \"curl -f http://localhost/ || exit 1\" ]`\n\nYou don't include the double quotes and brackets when you use the AWS Management Console.\n\n`CMD-SHELL, curl -f http://localhost/ || exit 1`\n\nAn exit code of 0 indicates success, and non-zero exit code indicates failure. For more information, see `HealthCheck` in the docker container create command", "Interval": "The time period in seconds between each health check execution. You may specify between 5 and 300 seconds. The default value is 30 seconds.", "Retries": "The number of times to retry a failed health check before the container is considered unhealthy. You may specify between 1 and 10 retries. The default value is 3.", "StartPeriod": "The optional grace period to provide containers time to bootstrap before failed health checks count towards the maximum number of retries. You can specify between 0 and 300 seconds. By default, the `startPeriod` is off.\n\n> If a health check succeeds within the `startPeriod` , then the container is considered healthy and any subsequent failures count toward the maximum number of retries.", @@ -13412,8 +14217,8 @@ "DeviceType": "The Elastic Inference accelerator type to use." }, "AWS::ECS::TaskDefinition KernelCapabilities": { - "Add": "The Linux capabilities for the container that have been added to the default configuration provided by Docker. This parameter maps to `CapAdd` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--cap-add` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> Tasks launched on AWS Fargate only support adding the `SYS_PTRACE` kernel capability. \n\nValid values: `\"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" | \"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" | \"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" | \"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\" | \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" | \"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" | \"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" | \"WAKE_ALARM\"`", - "Drop": "The Linux capabilities for the container that have been removed from the default configuration provided by Docker. This parameter maps to `CapDrop` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--cap-drop` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\nValid values: `\"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" | \"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" | \"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" | \"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\" | \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" | \"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" | \"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" | \"WAKE_ALARM\"`" + "Add": "The Linux capabilities for the container that have been added to the default configuration provided by Docker. This parameter maps to `CapAdd` in the docker container create command and the `--cap-add` option to docker run.\n\n> Tasks launched on AWS Fargate only support adding the `SYS_PTRACE` kernel capability. \n\nValid values: `\"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" | \"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" | \"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" | \"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\" | \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" | \"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" | \"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" | \"WAKE_ALARM\"`", + "Drop": "The Linux capabilities for the container that have been removed from the default configuration provided by Docker. This parameter maps to `CapDrop` in the docker container create command and the `--cap-drop` option to docker run.\n\nValid values: `\"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" | \"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" | \"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" | \"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\" | \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" | \"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" | \"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" | \"WAKE_ALARM\"`" }, "AWS::ECS::TaskDefinition KeyValuePair": { "Name": "The name of the key-value pair. For environment variables, this is the name of the environment variable.", @@ -13421,16 +14226,16 @@ }, "AWS::ECS::TaskDefinition LinuxParameters": { "Capabilities": "The Linux capabilities for the container that are added to or dropped from the default configuration provided by Docker.\n\n> For tasks that use the Fargate launch type, `capabilities` is supported for all platform versions but the `add` parameter is only supported if using platform version 1.4.0 or later.", - "Devices": "Any host devices to expose to the container. This parameter maps to `Devices` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--device` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> If you're using tasks that use the Fargate launch type, the `devices` parameter isn't supported.", - "InitProcessEnabled": "Run an `init` process inside the container that forwards signals and reaps processes. This parameter maps to the `--init` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) . This parameter requires version 1.25 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`", - "MaxSwap": "The total amount of swap memory (in MiB) a container can use. This parameter will be translated to the `--memory-swap` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) where the value would be the sum of the container memory plus the `maxSwap` value.\n\nIf a `maxSwap` value of `0` is specified, the container will not use swap. Accepted values are `0` or any positive integer. If the `maxSwap` parameter is omitted, the container will use the swap configuration for the container instance it is running on. A `maxSwap` value must be set for the `swappiness` parameter to be used.\n\n> If you're using tasks that use the Fargate launch type, the `maxSwap` parameter isn't supported.\n> \n> If you're using tasks on Amazon Linux 2023 the `swappiness` parameter isn't supported.", - "SharedMemorySize": "The value for the size (in MiB) of the `/dev/shm` volume. This parameter maps to the `--shm-size` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> If you are using tasks that use the Fargate launch type, the `sharedMemorySize` parameter is not supported.", - "Swappiness": "This allows you to tune a container's memory swappiness behavior. A `swappiness` value of `0` will cause swapping to not happen unless absolutely necessary. A `swappiness` value of `100` will cause pages to be swapped very aggressively. Accepted values are whole numbers between `0` and `100` . If the `swappiness` parameter is not specified, a default value of `60` is used. If a value is not specified for `maxSwap` then this parameter is ignored. This parameter maps to the `--memory-swappiness` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> If you're using tasks that use the Fargate launch type, the `swappiness` parameter isn't supported.\n> \n> If you're using tasks on Amazon Linux 2023 the `swappiness` parameter isn't supported.", - "Tmpfs": "The container path, mount options, and size (in MiB) of the tmpfs mount. This parameter maps to the `--tmpfs` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> If you're using tasks that use the Fargate launch type, the `tmpfs` parameter isn't supported." + "Devices": "Any host devices to expose to the container. This parameter maps to `Devices` in the docker container create command and the `--device` option to docker run.\n\n> If you're using tasks that use the Fargate launch type, the `devices` parameter isn't supported.", + "InitProcessEnabled": "Run an `init` process inside the container that forwards signals and reaps processes. This parameter maps to the `--init` option to docker run. This parameter requires version 1.25 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`", + "MaxSwap": "The total amount of swap memory (in MiB) a container can use. This parameter will be translated to the `--memory-swap` option to docker run where the value would be the sum of the container memory plus the `maxSwap` value.\n\nIf a `maxSwap` value of `0` is specified, the container will not use swap. Accepted values are `0` or any positive integer. If the `maxSwap` parameter is omitted, the container will use the swap configuration for the container instance it is running on. A `maxSwap` value must be set for the `swappiness` parameter to be used.\n\n> If you're using tasks that use the Fargate launch type, the `maxSwap` parameter isn't supported.\n> \n> If you're using tasks on Amazon Linux 2023 the `swappiness` parameter isn't supported.", + "SharedMemorySize": "The value for the size (in MiB) of the `/dev/shm` volume. This parameter maps to the `--shm-size` option to docker run.\n\n> If you are using tasks that use the Fargate launch type, the `sharedMemorySize` parameter is not supported.", + "Swappiness": "This allows you to tune a container's memory swappiness behavior. A `swappiness` value of `0` will cause swapping to not happen unless absolutely necessary. A `swappiness` value of `100` will cause pages to be swapped very aggressively. Accepted values are whole numbers between `0` and `100` . If the `swappiness` parameter is not specified, a default value of `60` is used. If a value is not specified for `maxSwap` then this parameter is ignored. This parameter maps to the `--memory-swappiness` option to docker run.\n\n> If you're using tasks that use the Fargate launch type, the `swappiness` parameter isn't supported.\n> \n> If you're using tasks on Amazon Linux 2023 the `swappiness` parameter isn't supported.", + "Tmpfs": "The container path, mount options, and size (in MiB) of the tmpfs mount. This parameter maps to the `--tmpfs` option to docker run.\n\n> If you're using tasks that use the Fargate launch type, the `tmpfs` parameter isn't supported." }, "AWS::ECS::TaskDefinition LogConfiguration": { "LogDriver": "The log driver to use for the container.\n\nFor tasks on AWS Fargate , the supported log drivers are `awslogs` , `splunk` , and `awsfirelens` .\n\nFor tasks hosted on Amazon EC2 instances, the supported log drivers are `awslogs` , `fluentd` , `gelf` , `json-file` , `journald` , `syslog` , `splunk` , and `awsfirelens` .\n\nFor more information about using the `awslogs` log driver, see [Send Amazon ECS logs to CloudWatch](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using_awslogs.html) in the *Amazon Elastic Container Service Developer Guide* .\n\nFor more information about using the `awsfirelens` log driver, see [Send Amazon ECS logs to an AWS service or AWS Partner](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using_firelens.html) .\n\n> If you have a custom driver that isn't listed, you can fork the Amazon ECS container agent project that's [available on GitHub](https://docs.aws.amazon.com/https://github.com/aws/amazon-ecs-agent) and customize it to work with that driver. We encourage you to submit pull requests for changes that you would like to have included. However, we don't currently provide support for running modified copies of this software.", - "Options": "The configuration options to send to the log driver. This parameter requires version 1.19 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`", + "Options": "The configuration options to send to the log driver.\n\nThe options you can specify depend on the log driver. Some of the options you can specify when you use the `awslogs` log driver to route logs to Amazon CloudWatch include the following:\n\n- **awslogs-create-group** - Required: No\n\nSpecify whether you want the log group to be created automatically. If this option isn't specified, it defaults to `false` .\n\n> Your IAM policy must include the `logs:CreateLogGroup` permission before you attempt to use `awslogs-create-group` .\n- **awslogs-region** - Required: Yes\n\nSpecify the AWS Region that the `awslogs` log driver is to send your Docker logs to. You can choose to send all of your logs from clusters in different Regions to a single region in CloudWatch Logs. This is so that they're all visible in one location. Otherwise, you can separate them by Region for more granularity. Make sure that the specified log group exists in the Region that you specify with this option.\n- **awslogs-group** - Required: Yes\n\nMake sure to specify a log group that the `awslogs` log driver sends its log streams to.\n- **awslogs-stream-prefix** - Required: Yes, when using the Fargate launch type.Optional for the EC2 launch type, required for the Fargate launch type.\n\nUse the `awslogs-stream-prefix` option to associate a log stream with the specified prefix, the container name, and the ID of the Amazon ECS task that the container belongs to. If you specify a prefix with this option, then the log stream takes the format `prefix-name/container-name/ecs-task-id` .\n\nIf you don't specify a prefix with this option, then the log stream is named after the container ID that's assigned by the Docker daemon on the container instance. Because it's difficult to trace logs back to the container that sent them with just the Docker container ID (which is only available on the container instance), we recommend that you specify a prefix with this option.\n\nFor Amazon ECS services, you can use the service name as the prefix. Doing so, you can trace log streams to the service that the container belongs to, the name of the container that sent them, and the ID of the task that the container belongs to.\n\nYou must specify a stream-prefix for your logs to have your logs appear in the Log pane when using the Amazon ECS console.\n- **awslogs-datetime-format** - Required: No\n\nThis option defines a multiline start pattern in Python `strftime` format. A log message consists of a line that matches the pattern and any following lines that don\u2019t match the pattern. The matched line is the delimiter between log messages.\n\nOne example of a use case for using this format is for parsing output such as a stack dump, which might otherwise be logged in multiple entries. The correct pattern allows it to be captured in a single entry.\n\nFor more information, see [awslogs-datetime-format](https://docs.aws.amazon.com/https://docs.docker.com/config/containers/logging/awslogs/#awslogs-datetime-format) .\n\nYou cannot configure both the `awslogs-datetime-format` and `awslogs-multiline-pattern` options.\n\n> Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance.\n- **awslogs-multiline-pattern** - Required: No\n\nThis option defines a multiline start pattern that uses a regular expression. A log message consists of a line that matches the pattern and any following lines that don\u2019t match the pattern. The matched line is the delimiter between log messages.\n\nFor more information, see [awslogs-multiline-pattern](https://docs.aws.amazon.com/https://docs.docker.com/config/containers/logging/awslogs/#awslogs-multiline-pattern) .\n\nThis option is ignored if `awslogs-datetime-format` is also configured.\n\nYou cannot configure both the `awslogs-datetime-format` and `awslogs-multiline-pattern` options.\n\n> Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance.\n- **mode** - Required: No\n\nValid values: `non-blocking` | `blocking`\n\nThis option defines the delivery mode of log messages from the container to CloudWatch Logs. The delivery mode you choose affects application availability when the flow of logs from container to CloudWatch is interrupted.\n\nIf you use the `blocking` mode and the flow of logs to CloudWatch is interrupted, calls from container code to write to the `stdout` and `stderr` streams will block. The logging thread of the application will block as a result. This may cause the application to become unresponsive and lead to container healthcheck failure.\n\nIf you use the `non-blocking` mode, the container's logs are instead stored in an in-memory intermediate buffer configured with the `max-buffer-size` option. This prevents the application from becoming unresponsive when logs cannot be sent to CloudWatch. We recommend using this mode if you want to ensure service availability and are okay with some log loss. For more information, see [Preventing log loss with non-blocking mode in the `awslogs` container log driver](https://docs.aws.amazon.com/containers/preventing-log-loss-with-non-blocking-mode-in-the-awslogs-container-log-driver/) .\n- **max-buffer-size** - Required: No\n\nDefault value: `1m`\n\nWhen `non-blocking` mode is used, the `max-buffer-size` log option controls the size of the buffer that's used for intermediate message storage. Make sure to specify an adequate buffer size based on your application. When the buffer fills up, further logs cannot be stored. Logs that cannot be stored are lost.\n\nTo route logs using the `splunk` log router, you need to specify a `splunk-token` and a `splunk-url` .\n\nWhen you use the `awsfirelens` log router to route logs to an AWS Service or AWS Partner Network destination for log storage and analytics, you can set the `log-driver-buffer-limit` option to limit the number of events that are buffered in memory, before being sent to the log router container. It can help to resolve potential log loss issue because high throughput might result in memory running out for the buffer inside of Docker.\n\nOther options you can specify when using `awsfirelens` to route logs depend on the destination. When you export logs to Amazon Data Firehose, you can specify the AWS Region with `region` and a name for the log stream with `delivery_stream` .\n\nWhen you export logs to Amazon Kinesis Data Streams, you can specify an AWS Region with `region` and a data stream name with `stream` .\n\nWhen you export logs to Amazon OpenSearch Service, you can specify options like `Name` , `Host` (OpenSearch Service endpoint without protocol), `Port` , `Index` , `Type` , `Aws_auth` , `Aws_region` , `Suppress_Type_Name` , and `tls` .\n\nWhen you export logs to Amazon S3, you can specify the bucket using the `bucket` option. You can also specify `region` , `total_file_size` , `upload_timeout` , and `use_put_object` as options.\n\nThis parameter requires version 1.19 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`", "SecretOptions": "The secrets to pass to the log configuration. For more information, see [Specifying sensitive data](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/specifying-sensitive-data.html) in the *Amazon Elastic Container Service Developer Guide* ." }, "AWS::ECS::TaskDefinition MountPoint": { @@ -13458,6 +14263,11 @@ "Type": "The type of resource to assign to a container.", "Value": "The value for the specified resource type.\n\nWhen the type is `GPU` , the value is the number of physical `GPUs` the Amazon ECS container agent reserves for the container. The number of GPUs that's reserved for all containers in a task can't exceed the number of available GPUs on the container instance that the task is launched on.\n\nWhen the type is `InferenceAccelerator` , the `value` matches the `deviceName` for an [InferenceAccelerator](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_InferenceAccelerator.html) specified in a task definition." }, + "AWS::ECS::TaskDefinition RestartPolicy": { + "Enabled": "Specifies whether a restart policy is enabled for the container.", + "IgnoredExitCodes": "A list of exit codes that Amazon ECS will ignore and not attempt a restart on. You can specify a maximum of 50 container exit codes. By default, Amazon ECS does not ignore any exit codes.", + "RestartAttemptPeriod": "A period of time (in seconds) that the container must run for before a restart can be attempted. A container can be restarted only once every `restartAttemptPeriod` seconds. If a container isn't able to run for this time period and exits early, it will not be restarted. You can set a minimum `restartAttemptPeriod` of 60 seconds and a maximum `restartAttemptPeriod` of 1800 seconds. By default, a container must run for 300 seconds before it can be restarted." + }, "AWS::ECS::TaskDefinition RuntimePlatform": { "CpuArchitecture": "The CPU architecture.\n\nYou can run your Linux tasks on an ARM-based platform by setting the value to `ARM64` . This option is available for tasks that run on Linux Amazon EC2 instance or Linux containers on Fargate.", "OperatingSystemFamily": "The operating system." @@ -13484,9 +14294,9 @@ "Size": "The maximum size (in MiB) of the tmpfs volume." }, "AWS::ECS::TaskDefinition Ulimit": { - "HardLimit": "The hard limit for the `ulimit` type.", + "HardLimit": "The hard limit for the `ulimit` type. The value can be specified in bytes, seconds, or as a count, depending on the `type` of the `ulimit` .", "Name": "The `type` of the `ulimit` .", - "SoftLimit": "The soft limit for the `ulimit` type." + "SoftLimit": "The soft limit for the `ulimit` type. The value can be specified in bytes, seconds, or as a count, depending on the `type` of the `ulimit` ." }, "AWS::ECS::TaskDefinition Volume": { "ConfiguredAtLaunch": "Indicates whether the volume should be configured at launch time. This is used to create Amazon EBS volumes for standalone tasks or tasks created as part of a service. Each task definition revision may only have one volume configured at launch in the volume configuration.\n\nTo configure a volume at launch time, use this task definition revision and specify a `volumeConfigurations` object when calling the `CreateService` , `UpdateService` , `RunTask` or `StartTask` APIs.", @@ -13515,8 +14325,8 @@ }, "AWS::ECS::TaskSet AwsVpcConfiguration": { "AssignPublicIp": "Whether the task's elastic network interface receives a public IP address. The default value is `DISABLED` .", - "SecurityGroups": "The IDs of the security groups associated with the task or service. If you don't specify a security group, the default security group for the VPC is used. There's a limit of 5 security groups that can be specified per `AwsVpcConfiguration` .\n\n> All specified security groups must be from the same VPC.", - "Subnets": "The IDs of the subnets associated with the task or service. There's a limit of 16 subnets that can be specified per `AwsVpcConfiguration` .\n\n> All specified subnets must be from the same VPC." + "SecurityGroups": "The IDs of the security groups associated with the task or service. If you don't specify a security group, the default security group for the VPC is used. There's a limit of 5 security groups that can be specified per `awsvpcConfiguration` .\n\n> All specified security groups must be from the same VPC.", + "Subnets": "The IDs of the subnets associated with the task or service. There's a limit of 16 subnets that can be specified per `awsvpcConfiguration` .\n\n> All specified subnets must be from the same VPC." }, "AWS::ECS::TaskSet LoadBalancer": { "ContainerName": "The name of the container (as it appears in a container definition) to associate with the load balancer.\n\nYou need to specify the container name when configuring the target group for an Amazon ECS load balancer.", @@ -13661,7 +14471,9 @@ "ResourcesVpcConfig": "The VPC configuration that's used by the cluster control plane. Amazon EKS VPC resources have specific requirements to work properly with Kubernetes. For more information, see [Cluster VPC Considerations](https://docs.aws.amazon.com/eks/latest/userguide/network_reqs.html) and [Cluster Security Group Considerations](https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html) in the *Amazon EKS User Guide* . You must specify at least two subnets. You can specify up to five security groups, but we recommend that you use a dedicated security group for your cluster control plane.", "RoleArn": "The Amazon Resource Name (ARN) of the IAM role that provides permissions for the Kubernetes control plane to make calls to AWS API operations on your behalf. For more information, see [Amazon EKS Service IAM Role](https://docs.aws.amazon.com/eks/latest/userguide/service_IAM_role.html) in the **Amazon EKS User Guide** .", "Tags": "The metadata that you apply to the cluster to assist with categorization and organization. Each tag consists of a key and an optional value, both of which you define. Cluster tags don't propagate to any other resources associated with the cluster.\n\n> You must have the `eks:TagResource` and `eks:UntagResource` permissions for your [IAM principal](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts.html) to manage the AWS CloudFormation stack. If you don't have these permissions, there might be unexpected behavior with stack-level tags propagating to the resource during resource creation and update.", - "Version": "The desired Kubernetes version for your cluster. If you don't specify a value here, the default version available in Amazon EKS is used.\n\n> The default version might not be the latest version available." + "UpgradePolicy": "This value indicates if extended support is enabled or disabled for the cluster.\n\n[Learn more about EKS Extended Support in the EKS User Guide.](https://docs.aws.amazon.com/eks/latest/userguide/extended-support-control.html)", + "Version": "The desired Kubernetes version for your cluster. If you don't specify a value here, the default version available in Amazon EKS is used.\n\n> The default version might not be the latest version available.", + "ZonalShiftConfig": "" }, "AWS::EKS::Cluster AccessConfig": { "AuthenticationMode": "The desired authentication mode for the cluster. If you create a cluster by using the EKS API, AWS SDKs, or AWS CloudFormation , the default is `CONFIG_MAP` . If you create the cluster by using the AWS Management Console , the default value is `API_AND_CONFIG_MAP` .", @@ -13707,6 +14519,12 @@ "Key": "One part of a key-value pair that make up a tag. A `key` is a general label that acts like a category for more specific tag values.", "Value": "The optional part of a key-value pair that make up a tag. A `value` acts as a descriptor within a tag category (key)." }, + "AWS::EKS::Cluster UpgradePolicy": { + "SupportType": "" + }, + "AWS::EKS::Cluster ZonalShiftConfig": { + "Enabled": "" + }, "AWS::EKS::FargateProfile": { "ClusterName": "The name of your cluster.", "FargateProfileName": "The name of the Fargate profile.", @@ -13892,12 +14710,17 @@ "InstanceTypeConfigs": "The instance type configurations that define the Amazon EC2 instances in the instance fleet.", "LaunchSpecifications": "The launch specification for the instance fleet.", "Name": "The friendly name of the instance fleet.", + "ResizeSpecifications": "The resize specification for the instance fleet.", "TargetOnDemandCapacity": "The target capacity of On-Demand units for the instance fleet, which determines how many On-Demand instances to provision. When the instance fleet launches, Amazon EMR tries to provision On-Demand instances as specified by `InstanceTypeConfig` . Each instance configuration has a specified `WeightedCapacity` . When an On-Demand instance is provisioned, the `WeightedCapacity` units count toward the target capacity. Amazon EMR provisions instances until the target capacity is totally fulfilled, even if this results in an overage. For example, if there are 2 units remaining to fulfill capacity, and Amazon EMR can only provision an instance with a `WeightedCapacity` of 5 units, the instance is provisioned, and the target capacity is exceeded by 3 units.\n\n> If not specified or set to 0, only Spot instances are provisioned for the instance fleet using `TargetSpotCapacity` . At least one of `TargetSpotCapacity` and `TargetOnDemandCapacity` should be greater than 0. For a master instance fleet, only one of `TargetSpotCapacity` and `TargetOnDemandCapacity` can be specified, and its value must be 1.", "TargetSpotCapacity": "The target capacity of Spot units for the instance fleet, which determines how many Spot instances to provision. When the instance fleet launches, Amazon EMR tries to provision Spot instances as specified by `InstanceTypeConfig` . Each instance configuration has a specified `WeightedCapacity` . When a Spot instance is provisioned, the `WeightedCapacity` units count toward the target capacity. Amazon EMR provisions instances until the target capacity is totally fulfilled, even if this results in an overage. For example, if there are 2 units remaining to fulfill capacity, and Amazon EMR can only provision an instance with a `WeightedCapacity` of 5 units, the instance is provisioned, and the target capacity is exceeded by 3 units.\n\n> If not specified or set to 0, only On-Demand instances are provisioned for the instance fleet. At least one of `TargetSpotCapacity` and `TargetOnDemandCapacity` should be greater than 0. For a master instance fleet, only one of `TargetSpotCapacity` and `TargetOnDemandCapacity` can be specified, and its value must be 1." }, "AWS::EMR::Cluster InstanceFleetProvisioningSpecifications": { - "OnDemandSpecification": "The launch specification for On-Demand Instances in the instance fleet, which determines the allocation strategy.\n\n> The instance fleet configuration is available only in Amazon EMR releases 4.8.0 and later, excluding 5.0.x versions. On-Demand Instances allocation strategy is available in Amazon EMR releases 5.12.1 and later.", - "SpotSpecification": "The launch specification for Spot instances in the fleet, which determines the defined duration, provisioning timeout behavior, and allocation strategy." + "OnDemandSpecification": "The launch specification for On-Demand Instances in the instance fleet, which determines the allocation strategy and capacity reservation options.\n\n> The instance fleet configuration is available only in Amazon EMR releases 4.8.0 and later, excluding 5.0.x versions. On-Demand Instances allocation strategy is available in Amazon EMR releases 5.12.1 and later.", + "SpotSpecification": "The launch specification for Spot instances in the fleet, which determines the allocation strategy, defined duration, and provisioning timeout behavior." + }, + "AWS::EMR::Cluster InstanceFleetResizingSpecifications": { + "OnDemandResizeSpecification": "The resize specification for On-Demand Instances in the instance fleet, which contains the allocation strategy, capacity reservation options, and the resize timeout period.", + "SpotResizeSpecification": "The resize specification for Spot Instances in the instance fleet, which contains the allocation strategy and the resize timeout period." }, "AWS::EMR::Cluster InstanceGroupConfig": { "AutoScalingPolicy": "`AutoScalingPolicy` is a subproperty of the [InstanceGroupConfig](https://docs.aws.amazon.com//AWSCloudFormation/latest/UserGuide/aws-properties-emr-cluster-jobflowinstancesconfig-instancegroupconfig.html) property type that specifies the constraints and rules of an automatic scaling policy in Amazon EMR . The automatic scaling policy defines how an instance group dynamically adds and terminates EC2 instances in response to the value of a CloudWatch metric. Only core and task instance groups can use automatic scaling policies. For more information, see [Using Automatic Scaling in Amazon EMR](https://docs.aws.amazon.com//emr/latest/ManagementGuide/emr-automatic-scaling.html) .", @@ -13917,6 +14740,7 @@ "CustomAmiId": "The custom AMI ID to use for the instance type.", "EbsConfiguration": "The configuration of Amazon Elastic Block Store (Amazon EBS) attached to each instance as defined by `InstanceType` .", "InstanceType": "An Amazon EC2 instance type, such as `m3.xlarge` .", + "Priority": "The priority at which Amazon EMR launches the Amazon EC2 instances with this instance type. Priority starts at 0, which is the highest priority. Amazon EMR considers the highest priority first.", "WeightedCapacity": "The number of units that a provisioned instance of this type provides toward fulfilling the target capacities defined in `InstanceFleetConfig` . This value is 1 for a master instance fleet, and must be 1 or greater for core and task instance fleets. Defaults to 1 if not specified." }, "AWS::EMR::Cluster JobFlowInstancesConfig": { @@ -13958,8 +14782,19 @@ "Key": "The dimension name.", "Value": "The dimension value." }, + "AWS::EMR::Cluster OnDemandCapacityReservationOptions": { + "CapacityReservationPreference": "Indicates the instance's Capacity Reservation preferences. Possible preferences include:\n\n- `open` - The instance can run in any open Capacity Reservation that has matching attributes (instance type, platform, Availability Zone).\n- `none` - The instance avoids running in a Capacity Reservation even if one is available. The instance runs as an On-Demand Instance.", + "CapacityReservationResourceGroupArn": "The ARN of the Capacity Reservation resource group in which to run the instance.", + "UsageStrategy": "Indicates whether to use unused Capacity Reservations for fulfilling On-Demand capacity.\n\nIf you specify `use-capacity-reservations-first` , the fleet uses unused Capacity Reservations to fulfill On-Demand capacity up to the target On-Demand capacity. If multiple instance pools have unused Capacity Reservations, the On-Demand allocation strategy ( `lowest-price` ) is applied. If the number of unused Capacity Reservations is less than the On-Demand target capacity, the remaining On-Demand target capacity is launched according to the On-Demand allocation strategy ( `lowest-price` ).\n\nIf you do not specify a value, the fleet fulfills the On-Demand capacity according to the chosen On-Demand allocation strategy." + }, "AWS::EMR::Cluster OnDemandProvisioningSpecification": { - "AllocationStrategy": "Specifies the strategy to use in launching On-Demand instance fleets. Available options are `lowest-price` and `prioritized` . `lowest-price` specifies to launch the instances with the lowest price first, and `prioritized` specifies that Amazon EMR should launch the instances with the highest priority first. The default is `lowest-price` ." + "AllocationStrategy": "Specifies the strategy to use in launching On-Demand instance fleets. Available options are `lowest-price` and `prioritized` . `lowest-price` specifies to launch the instances with the lowest price first, and `prioritized` specifies that Amazon EMR should launch the instances with the highest priority first. The default is `lowest-price` .", + "CapacityReservationOptions": "The launch specification for On-Demand instances in the instance fleet, which determines the allocation strategy." + }, + "AWS::EMR::Cluster OnDemandResizingSpecification": { + "AllocationStrategy": "Specifies the allocation strategy to use to launch On-Demand instances during a resize. The default is `lowest-price` .", + "CapacityReservationOptions": "", + "TimeoutDurationMinutes": "On-Demand resize timeout in minutes. If On-Demand Instances are not provisioned within this time, the resize workflow stops. The minimum value is 5 minutes, and the maximum value is 10,080 minutes (7 days). The timeout applies to all resize workflows on the Instance Fleet. The resize could be triggered by Amazon EMR Managed Scaling or by the customer (via Amazon EMR Console, Amazon EMR CLI modify-instance-fleet or Amazon EMR SDK ModifyInstanceFleet API) or by Amazon EMR due to Amazon EC2 Spot Reclamation." }, "AWS::EMR::Cluster PlacementGroupConfig": { "InstanceRole": "Role of the instance in the cluster.\n\nStarting with Amazon EMR release 5.23.0, the only supported instance role is `MASTER` .", @@ -14000,6 +14835,10 @@ "TimeoutAction": "The action to take when `TargetSpotCapacity` has not been fulfilled when the `TimeoutDurationMinutes` has expired; that is, when all Spot Instances could not be provisioned within the Spot provisioning timeout. Valid values are `TERMINATE_CLUSTER` and `SWITCH_TO_ON_DEMAND` . SWITCH_TO_ON_DEMAND specifies that if no Spot Instances are available, On-Demand Instances should be provisioned to fulfill any remaining Spot capacity.", "TimeoutDurationMinutes": "The Spot provisioning timeout period in minutes. If Spot Instances are not provisioned within this time period, the `TimeOutAction` is taken. Minimum value is 5 and maximum value is 1440. The timeout applies only during initial provisioning, when the cluster is first created." }, + "AWS::EMR::Cluster SpotResizingSpecification": { + "AllocationStrategy": "Specifies the allocation strategy to use to launch Spot instances during a resize. If you run Amazon EMR releases 6.9.0 or higher, the default is `price-capacity-optimized` . If you run Amazon EMR releases 6.8.0 or lower, the default is `capacity-optimized` .", + "TimeoutDurationMinutes": "Spot resize timeout in minutes. If Spot Instances are not provisioned within this time, the resize workflow will stop provisioning of Spot instances. Minimum value is 5 minutes and maximum value is 10,080 minutes (7 days). The timeout applies to all resize workflows on the Instance Fleet. The resize could be triggered by Amazon EMR Managed Scaling or by the customer (via Amazon EMR Console, Amazon EMR CLI modify-instance-fleet or Amazon EMR SDK ModifyInstanceFleet API) or by Amazon EMR due to Amazon EC2 Spot Reclamation." + }, "AWS::EMR::Cluster StepConfig": { "ActionOnFailure": "The action to take when the cluster step fails. Possible values are `CANCEL_AND_WAIT` and `CONTINUE` .", "HadoopJarStep": "The JAR file used for the step.", @@ -14021,6 +14860,7 @@ "InstanceTypeConfigs": "`InstanceTypeConfigs` determine the EC2 instances that Amazon EMR attempts to provision to fulfill On-Demand and Spot target capacities.\n\n> The instance fleet configuration is available only in Amazon EMR versions 4.8.0 and later, excluding 5.0.x versions.", "LaunchSpecifications": "The launch specification for the instance fleet.", "Name": "The friendly name of the instance fleet.", + "ResizeSpecifications": "The resize specification for the instance fleet.", "TargetOnDemandCapacity": "The target capacity of On-Demand units for the instance fleet, which determines how many On-Demand instances to provision. When the instance fleet launches, Amazon EMR tries to provision On-Demand instances as specified by `InstanceTypeConfig` . Each instance configuration has a specified `WeightedCapacity` . When an On-Demand instance is provisioned, the `WeightedCapacity` units count toward the target capacity. Amazon EMR provisions instances until the target capacity is totally fulfilled, even if this results in an overage. For example, if there are 2 units remaining to fulfill capacity, and Amazon EMR can only provision an instance with a `WeightedCapacity` of 5 units, the instance is provisioned, and the target capacity is exceeded by 3 units.\n\n> If not specified or set to 0, only Spot instances are provisioned for the instance fleet using `TargetSpotCapacity` . At least one of `TargetSpotCapacity` and `TargetOnDemandCapacity` should be greater than 0. For a master instance fleet, only one of `TargetSpotCapacity` and `TargetOnDemandCapacity` can be specified, and its value must be 1.", "TargetSpotCapacity": "The target capacity of Spot units for the instance fleet, which determines how many Spot instances to provision. When the instance fleet launches, Amazon EMR tries to provision Spot instances as specified by `InstanceTypeConfig` . Each instance configuration has a specified `WeightedCapacity` . When a Spot instance is provisioned, the `WeightedCapacity` units count toward the target capacity. Amazon EMR provisions instances until the target capacity is totally fulfilled, even if this results in an overage. For example, if there are 2 units remaining to fulfill capacity, and Amazon EMR can only provision an instance with a `WeightedCapacity` of 5 units, the instance is provisioned, and the target capacity is exceeded by 3 units.\n\n> If not specified or set to 0, only On-Demand instances are provisioned for the instance fleet. At least one of `TargetSpotCapacity` and `TargetOnDemandCapacity` should be greater than 0. For a master instance fleet, only one of `TargetSpotCapacity` and `TargetOnDemandCapacity` can be specified, and its value must be 1." }, @@ -14038,8 +14878,12 @@ "EbsOptimized": "Indicates whether an Amazon EBS volume is EBS-optimized." }, "AWS::EMR::InstanceFleetConfig InstanceFleetProvisioningSpecifications": { - "OnDemandSpecification": "The launch specification for On-Demand Instances in the instance fleet, which determines the allocation strategy.\n\n> The instance fleet configuration is available only in Amazon EMR releases 4.8.0 and later, excluding 5.0.x versions. On-Demand Instances allocation strategy is available in Amazon EMR releases 5.12.1 and later.", - "SpotSpecification": "The launch specification for Spot instances in the fleet, which determines the defined duration, provisioning timeout behavior, and allocation strategy." + "OnDemandSpecification": "The launch specification for On-Demand Instances in the instance fleet, which determines the allocation strategy and capacity reservation options.\n\n> The instance fleet configuration is available only in Amazon EMR releases 4.8.0 and later, excluding 5.0.x versions. On-Demand Instances allocation strategy is available in Amazon EMR releases 5.12.1 and later.", + "SpotSpecification": "The launch specification for Spot instances in the fleet, which determines the allocation strategy, defined duration, and provisioning timeout behavior." + }, + "AWS::EMR::InstanceFleetConfig InstanceFleetResizingSpecifications": { + "OnDemandResizeSpecification": "The resize specification for On-Demand Instances in the instance fleet, which contains the allocation strategy, capacity reservation options, and the resize timeout period.", + "SpotResizeSpecification": "The resize specification for Spot Instances in the instance fleet, which contains the allocation strategy and the resize timeout period." }, "AWS::EMR::InstanceFleetConfig InstanceTypeConfig": { "BidPrice": "The bid price for each Amazon EC2 Spot Instance type as defined by `InstanceType` . Expressed in USD. If neither `BidPrice` nor `BidPriceAsPercentageOfOnDemandPrice` is provided, `BidPriceAsPercentageOfOnDemandPrice` defaults to 100%.", @@ -14048,10 +14892,22 @@ "CustomAmiId": "The custom AMI ID to use for the instance type.", "EbsConfiguration": "The configuration of Amazon Elastic Block Store (Amazon EBS) attached to each instance as defined by `InstanceType` .", "InstanceType": "An Amazon EC2 instance type, such as `m3.xlarge` .", + "Priority": "The priority at which Amazon EMR launches the Amazon EC2 instances with this instance type. Priority starts at 0, which is the highest priority. Amazon EMR considers the highest priority first.", "WeightedCapacity": "The number of units that a provisioned instance of this type provides toward fulfilling the target capacities defined in `InstanceFleetConfig` . This value is 1 for a master instance fleet, and must be 1 or greater for core and task instance fleets. Defaults to 1 if not specified." }, + "AWS::EMR::InstanceFleetConfig OnDemandCapacityReservationOptions": { + "CapacityReservationPreference": "Indicates the instance's Capacity Reservation preferences. Possible preferences include:\n\n- `open` - The instance can run in any open Capacity Reservation that has matching attributes (instance type, platform, Availability Zone).\n- `none` - The instance avoids running in a Capacity Reservation even if one is available. The instance runs as an On-Demand Instance.", + "CapacityReservationResourceGroupArn": "The ARN of the Capacity Reservation resource group in which to run the instance.", + "UsageStrategy": "Indicates whether to use unused Capacity Reservations for fulfilling On-Demand capacity.\n\nIf you specify `use-capacity-reservations-first` , the fleet uses unused Capacity Reservations to fulfill On-Demand capacity up to the target On-Demand capacity. If multiple instance pools have unused Capacity Reservations, the On-Demand allocation strategy ( `lowest-price` ) is applied. If the number of unused Capacity Reservations is less than the On-Demand target capacity, the remaining On-Demand target capacity is launched according to the On-Demand allocation strategy ( `lowest-price` ).\n\nIf you do not specify a value, the fleet fulfills the On-Demand capacity according to the chosen On-Demand allocation strategy." + }, "AWS::EMR::InstanceFleetConfig OnDemandProvisioningSpecification": { - "AllocationStrategy": "Specifies the strategy to use in launching On-Demand instance fleets. Available options are `lowest-price` and `prioritized` . `lowest-price` specifies to launch the instances with the lowest price first, and `prioritized` specifies that Amazon EMR should launch the instances with the highest priority first. The default is `lowest-price` ." + "AllocationStrategy": "Specifies the strategy to use in launching On-Demand instance fleets. Available options are `lowest-price` and `prioritized` . `lowest-price` specifies to launch the instances with the lowest price first, and `prioritized` specifies that Amazon EMR should launch the instances with the highest priority first. The default is `lowest-price` .", + "CapacityReservationOptions": "The launch specification for On-Demand instances in the instance fleet, which determines the allocation strategy." + }, + "AWS::EMR::InstanceFleetConfig OnDemandResizingSpecification": { + "AllocationStrategy": "Specifies the allocation strategy to use to launch On-Demand instances during a resize. The default is `lowest-price` .", + "CapacityReservationOptions": "", + "TimeoutDurationMinutes": "On-Demand resize timeout in minutes. If On-Demand Instances are not provisioned within this time, the resize workflow stops. The minimum value is 5 minutes, and the maximum value is 10,080 minutes (7 days). The timeout applies to all resize workflows on the Instance Fleet. The resize could be triggered by Amazon EMR Managed Scaling or by the customer (via Amazon EMR Console, Amazon EMR CLI modify-instance-fleet or Amazon EMR SDK ModifyInstanceFleet API) or by Amazon EMR due to Amazon EC2 Spot Reclamation." }, "AWS::EMR::InstanceFleetConfig SpotProvisioningSpecification": { "AllocationStrategy": "Specifies one of the following strategies to launch Spot Instance fleets: `capacity-optimized` , `price-capacity-optimized` , `lowest-price` , or `diversified` , and `capacity-optimized-prioritized` . For more information on the provisioning strategies, see [Allocation strategies for Spot Instances](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-fleet-allocation-strategy.html) in the *Amazon EC2 User Guide for Linux Instances* .\n\n> When you launch a Spot Instance fleet with the old console, it automatically launches with the `capacity-optimized` strategy. You can't change the allocation strategy from the old console.", @@ -14059,6 +14915,10 @@ "TimeoutAction": "The action to take when `TargetSpotCapacity` has not been fulfilled when the `TimeoutDurationMinutes` has expired; that is, when all Spot Instances could not be provisioned within the Spot provisioning timeout. Valid values are `TERMINATE_CLUSTER` and `SWITCH_TO_ON_DEMAND` . SWITCH_TO_ON_DEMAND specifies that if no Spot Instances are available, On-Demand Instances should be provisioned to fulfill any remaining Spot capacity.", "TimeoutDurationMinutes": "The Spot provisioning timeout period in minutes. If Spot Instances are not provisioned within this time period, the `TimeOutAction` is taken. Minimum value is 5 and maximum value is 1440. The timeout applies only during initial provisioning, when the cluster is first created." }, + "AWS::EMR::InstanceFleetConfig SpotResizingSpecification": { + "AllocationStrategy": "Specifies the allocation strategy to use to launch Spot instances during a resize. If you run Amazon EMR releases 6.9.0 or higher, the default is `price-capacity-optimized` . If you run Amazon EMR releases 6.8.0 or lower, the default is `capacity-optimized` .", + "TimeoutDurationMinutes": "Spot resize timeout in minutes. If Spot Instances are not provisioned within this time, the resize workflow will stop provisioning of Spot instances. Minimum value is 5 minutes and maximum value is 10,080 minutes (7 days). The timeout applies to all resize workflows on the Instance Fleet. The resize could be triggered by Amazon EMR Managed Scaling or by the customer (via Amazon EMR Console, Amazon EMR CLI modify-instance-fleet or Amazon EMR SDK ModifyInstanceFleet API) or by Amazon EMR due to Amazon EC2 Spot Reclamation." + }, "AWS::EMR::InstanceFleetConfig VolumeSpecification": { "Iops": "The number of I/O operations per second (IOPS) that the volume supports.", "SizeInGB": "The volume size, in gibibytes (GiB). This can be a number from 1 - 1024. If the volume type is EBS-optimized, the minimum value is 10.", @@ -14221,6 +15081,7 @@ "AutoStopConfiguration": "The configuration for an application to automatically stop after a certain amount of time being idle.", "ImageConfiguration": "The image configuration applied to all worker types.", "InitialCapacity": "The initial capacity of the application.", + "InteractiveConfiguration": "The interactive configuration object that enables the interactive use cases for an application.", "MaximumCapacity": "The maximum capacity of the application. This is cumulative across all workers at any given point in time during the lifespan of the application is created. No new resources will be created once any one of the defined limits is hit.", "MonitoringConfiguration": "A configuration specification to be used when provisioning an application. A configuration consists of a classification, properties, and optional nested configurations. A classification refers to an application-specific configuration file. Properties are the settings you want to change in that file.", "Name": "The name of the application.", @@ -14261,6 +15122,10 @@ "Key": "", "Value": "" }, + "AWS::EMRServerless::Application InteractiveConfiguration": { + "LivyEndpointEnabled": "Enables an Apache Livy endpoint that you can connect to and run interactive jobs.", + "StudioEnabled": "Enables you to connect an application to Amazon EMR Studio to run interactive workloads in a notebook." + }, "AWS::EMRServerless::Application LogTypeMapKeyValuePair": { "Key": "", "Value": "" @@ -14302,25 +15167,25 @@ }, "AWS::ElastiCache::CacheCluster": { "AZMode": "Specifies whether the nodes in this Memcached cluster are created in a single Availability Zone or created across multiple Availability Zones in the cluster's region.\n\nThis parameter is only supported for Memcached clusters.\n\nIf the `AZMode` and `PreferredAvailabilityZones` are not specified, ElastiCache assumes `single-az` mode.", - "AutoMinorVersionUpgrade": "If you are running Redis engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next minor version upgrade campaign. This parameter is disabled for previous versions.", - "CacheNodeType": "The compute and memory capacity of the nodes in the node group (shard).\n\nThe following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts. Changing the CacheNodeType of a Memcached instance is currently not supported. If you need to scale using Memcached, we recommend forcing a replacement update by changing the `LogicalResourceId` of the resource.\n\n- General purpose:\n\n- Current generation:\n\n*M6g node types:* `cache.m6g.large` , `cache.m6g.xlarge` , `cache.m6g.2xlarge` , `cache.m6g.4xlarge` , `cache.m6g.8xlarge` , `cache.m6g.12xlarge` , `cache.m6g.16xlarge` , `cache.m6g.24xlarge`\n\n*M5 node types:* `cache.m5.large` , `cache.m5.xlarge` , `cache.m5.2xlarge` , `cache.m5.4xlarge` , `cache.m5.12xlarge` , `cache.m5.24xlarge`\n\n*M4 node types:* `cache.m4.large` , `cache.m4.xlarge` , `cache.m4.2xlarge` , `cache.m4.4xlarge` , `cache.m4.10xlarge`\n\n*T4g node types:* `cache.t4g.micro` , `cache.t4g.small` , `cache.t4g.medium`\n\n*T3 node types:* `cache.t3.micro` , `cache.t3.small` , `cache.t3.medium`\n\n*T2 node types:* `cache.t2.micro` , `cache.t2.small` , `cache.t2.medium`\n- Previous generation: (not recommended)\n\n*T1 node types:* `cache.t1.micro`\n\n*M1 node types:* `cache.m1.small` , `cache.m1.medium` , `cache.m1.large` , `cache.m1.xlarge`\n\n*M3 node types:* `cache.m3.medium` , `cache.m3.large` , `cache.m3.xlarge` , `cache.m3.2xlarge`\n- Compute optimized:\n\n- Previous generation: (not recommended)\n\n*C1 node types:* `cache.c1.xlarge`\n- Memory optimized:\n\n- Current generation:\n\n*R6gd node types:* `cache.r6gd.xlarge` , `cache.r6gd.2xlarge` , `cache.r6gd.4xlarge` , `cache.r6gd.8xlarge` , `cache.r6gd.12xlarge` , `cache.r6gd.16xlarge`\n\n> The `r6gd` family is available in the following regions: `us-east-2` , `us-east-1` , `us-west-2` , `us-west-1` , `eu-west-1` , `eu-central-1` , `ap-northeast-1` , `ap-southeast-1` , `ap-southeast-2` . \n\n*R6g node types:* `cache.r6g.large` , `cache.r6g.xlarge` , `cache.r6g.2xlarge` , `cache.r6g.4xlarge` , `cache.r6g.8xlarge` , `cache.r6g.12xlarge` , `cache.r6g.16xlarge` , `cache.r6g.24xlarge`\n\n*R5 node types:* `cache.r5.large` , `cache.r5.xlarge` , `cache.r5.2xlarge` , `cache.r5.4xlarge` , `cache.r5.12xlarge` , `cache.r5.24xlarge`\n\n*R4 node types:* `cache.r4.large` , `cache.r4.xlarge` , `cache.r4.2xlarge` , `cache.r4.4xlarge` , `cache.r4.8xlarge` , `cache.r4.16xlarge`\n- Previous generation: (not recommended)\n\n*M2 node types:* `cache.m2.xlarge` , `cache.m2.2xlarge` , `cache.m2.4xlarge`\n\n*R3 node types:* `cache.r3.large` , `cache.r3.xlarge` , `cache.r3.2xlarge` , `cache.r3.4xlarge` , `cache.r3.8xlarge`\n\nFor region availability, see [Supported Node Types by Region](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion)\n\n*Additional node type info*\n\n- All current generation instance types are created in Amazon VPC by default.\n- Redis append-only files (AOF) are not supported for T1 or T2 instances.\n- Redis Multi-AZ with automatic failover is not supported on T1 instances.\n- Redis configuration variables `appendonly` and `appendfsync` are not supported on Redis version 2.8.22 and later.", + "AutoMinorVersionUpgrade": "If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next minor version upgrade campaign. This parameter is disabled for previous versions.", + "CacheNodeType": "The compute and memory capacity of the nodes in the node group (shard).\n\nThe following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts. Changing the CacheNodeType of a Memcached instance is currently not supported. If you need to scale using Memcached, we recommend forcing a replacement update by changing the `LogicalResourceId` of the resource.\n\n- General purpose:\n\n- Current generation:\n\n*M6g node types:* `cache.m6g.large` , `cache.m6g.xlarge` , `cache.m6g.2xlarge` , `cache.m6g.4xlarge` , `cache.m6g.8xlarge` , `cache.m6g.12xlarge` , `cache.m6g.16xlarge` , `cache.m6g.24xlarge`\n\n*M5 node types:* `cache.m5.large` , `cache.m5.xlarge` , `cache.m5.2xlarge` , `cache.m5.4xlarge` , `cache.m5.12xlarge` , `cache.m5.24xlarge`\n\n*M4 node types:* `cache.m4.large` , `cache.m4.xlarge` , `cache.m4.2xlarge` , `cache.m4.4xlarge` , `cache.m4.10xlarge`\n\n*T4g node types:* `cache.t4g.micro` , `cache.t4g.small` , `cache.t4g.medium`\n\n*T3 node types:* `cache.t3.micro` , `cache.t3.small` , `cache.t3.medium`\n\n*T2 node types:* `cache.t2.micro` , `cache.t2.small` , `cache.t2.medium`\n- Previous generation: (not recommended)\n\n*T1 node types:* `cache.t1.micro`\n\n*M1 node types:* `cache.m1.small` , `cache.m1.medium` , `cache.m1.large` , `cache.m1.xlarge`\n\n*M3 node types:* `cache.m3.medium` , `cache.m3.large` , `cache.m3.xlarge` , `cache.m3.2xlarge`\n- Compute optimized:\n\n- Previous generation: (not recommended)\n\n*C1 node types:* `cache.c1.xlarge`\n- Memory optimized:\n\n- Current generation:\n\n*R6gd node types:* `cache.r6gd.xlarge` , `cache.r6gd.2xlarge` , `cache.r6gd.4xlarge` , `cache.r6gd.8xlarge` , `cache.r6gd.12xlarge` , `cache.r6gd.16xlarge`\n\n> The `r6gd` family is available in the following regions: `us-east-2` , `us-east-1` , `us-west-2` , `us-west-1` , `eu-west-1` , `eu-central-1` , `ap-northeast-1` , `ap-southeast-1` , `ap-southeast-2` . \n\n*R6g node types:* `cache.r6g.large` , `cache.r6g.xlarge` , `cache.r6g.2xlarge` , `cache.r6g.4xlarge` , `cache.r6g.8xlarge` , `cache.r6g.12xlarge` , `cache.r6g.16xlarge` , `cache.r6g.24xlarge`\n\n*R5 node types:* `cache.r5.large` , `cache.r5.xlarge` , `cache.r5.2xlarge` , `cache.r5.4xlarge` , `cache.r5.12xlarge` , `cache.r5.24xlarge`\n\n*R4 node types:* `cache.r4.large` , `cache.r4.xlarge` , `cache.r4.2xlarge` , `cache.r4.4xlarge` , `cache.r4.8xlarge` , `cache.r4.16xlarge`\n- Previous generation: (not recommended)\n\n*M2 node types:* `cache.m2.xlarge` , `cache.m2.2xlarge` , `cache.m2.4xlarge`\n\n*R3 node types:* `cache.r3.large` , `cache.r3.xlarge` , `cache.r3.2xlarge` , `cache.r3.4xlarge` , `cache.r3.8xlarge`\n\nFor region availability, see [Supported Node Types by Region](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion)\n\n*Additional node type info*\n\n- All current generation instance types are created in Amazon VPC by default.\n- Redis OSS append-only files (AOF) are not supported for T1 or T2 instances.\n- Redis OSS Multi-AZ with automatic failover is not supported on T1 instances.\n- Redis OSS configuration variables `appendonly` and `appendfsync` are not supported on Redis OSS version 2.8.22 and later.", "CacheParameterGroupName": "The name of the parameter group to associate with this cluster. If this argument is omitted, the default parameter group for the specified engine is used. You cannot use any parameter group which has `cluster-enabled='yes'` when creating a cluster.", "CacheSecurityGroupNames": "A list of security group names to associate with this cluster.\n\nUse this parameter only when you are creating a cluster outside of an Amazon Virtual Private Cloud (Amazon VPC).", "CacheSubnetGroupName": "The name of the subnet group to be used for the cluster.\n\nUse this parameter only when you are creating a cluster in an Amazon Virtual Private Cloud (Amazon VPC).\n\n> If you're going to launch your cluster in an Amazon VPC, you need to create a subnet group before you start creating a cluster. For more information, see `[AWS::ElastiCache::SubnetGroup](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticache-subnetgroup.html) .`", "ClusterName": "A name for the cache cluster. If you don't specify a name, AWS CloudFormation generates a unique physical ID and uses that ID for the cache cluster. For more information, see [Name Type](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-name.html) .\n\nThe name must contain 1 to 50 alphanumeric characters or hyphens. The name must start with a letter and cannot end with a hyphen or contain two consecutive hyphens.", "Engine": "The name of the cache engine to be used for this cluster.\n\nValid values for this parameter are: `memcached` | `redis`", "EngineVersion": "The version number of the cache engine to be used for this cluster. To view the supported cache engine versions, use the DescribeCacheEngineVersions operation.\n\n*Important:* You can upgrade to a newer engine version (see [Selecting a Cache Engine and Version](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/SelectEngine.html#VersionManagement) ), but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing cluster or replication group and create it anew with the earlier engine version.", - "IpDiscovery": "The network type you choose when modifying a cluster, either `ipv4` | `ipv6` . IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", + "IpDiscovery": "The network type you choose when modifying a cluster, either `ipv4` | `ipv6` . IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", "LogDeliveryConfigurations": "Specifies the destination, format and type of the logs.", - "NetworkType": "Must be either `ipv4` | `ipv6` | `dual_stack` . IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", + "NetworkType": "Must be either `ipv4` | `ipv6` | `dual_stack` . IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", "NotificationTopicArn": "The Amazon Resource Name (ARN) of the Amazon Simple Notification Service (SNS) topic to which notifications are sent.\n\n> The Amazon SNS topic owner must be the same as the cluster owner.", "NumCacheNodes": "The number of cache nodes that the cache cluster should have.\n\n> However, if the `PreferredAvailabilityZone` and `PreferredAvailabilityZones` properties were not previously specified and you don't specify any new values, an update requires [replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .", "Port": "The port number on which each of the cache nodes accepts connections.", "PreferredAvailabilityZone": "The EC2 Availability Zone in which the cluster is created.\n\nAll nodes belonging to this cluster are placed in the preferred Availability Zone. If you want to create your nodes across multiple Availability Zones, use `PreferredAvailabilityZones` .\n\nDefault: System chosen Availability Zone.", "PreferredAvailabilityZones": "A list of the Availability Zones in which cache nodes are created. The order of the zones in the list is not important.\n\nThis option is only supported on Memcached.\n\n> If you are creating your cluster in an Amazon VPC (recommended) you can only locate nodes in Availability Zones that are associated with the subnets in the selected subnet group.\n> \n> The number of Availability Zones listed must equal the value of `NumCacheNodes` . \n\nIf you want all the nodes in the same Availability Zone, use `PreferredAvailabilityZone` instead, or repeat the Availability Zone multiple times in the list.\n\nDefault: System chosen Availability Zones.", "PreferredMaintenanceWindow": "Specifies the weekly time range during which maintenance on the cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period.\n\nValid values for `ddd` are:\n\n- `sun`\n- `mon`\n- `tue`\n- `wed`\n- `thu`\n- `fri`\n- `sat`\n\nExample: `sun:23:00-mon:01:30`", - "SnapshotArns": "A single-element string list containing an Amazon Resource Name (ARN) that uniquely identifies a Redis RDB snapshot file stored in Amazon S3. The snapshot file is used to populate the node group (shard). The Amazon S3 object name in the ARN cannot contain any commas.\n\n> This parameter is only valid if the `Engine` parameter is `redis` . \n\nExample of an Amazon S3 ARN: `arn:aws:s3:::my_bucket/snapshot1.rdb`", - "SnapshotName": "The name of a Redis snapshot from which to restore data into the new node group (shard). The snapshot status changes to `restoring` while the new node group (shard) is being created.\n\n> This parameter is only valid if the `Engine` parameter is `redis` .", + "SnapshotArns": "A single-element string list containing an Amazon Resource Name (ARN) that uniquely identifies a Redis OSS RDB snapshot file stored in Amazon S3. The snapshot file is used to populate the node group (shard). The Amazon S3 object name in the ARN cannot contain any commas.\n\n> This parameter is only valid if the `Engine` parameter is `redis` . \n\nExample of an Amazon S3 ARN: `arn:aws:s3:::my_bucket/snapshot1.rdb`", + "SnapshotName": "The name of a Redis OSS snapshot from which to restore data into the new node group (shard). The snapshot status changes to `restoring` while the new node group (shard) is being created.\n\n> This parameter is only valid if the `Engine` parameter is `redis` .", "SnapshotRetentionLimit": "The number of days for which ElastiCache retains automatic snapshots before deleting them. For example, if you set `SnapshotRetentionLimit` to 5, a snapshot taken today is retained for 5 days before being deleted.\n\n> This parameter is only valid if the `Engine` parameter is `redis` . \n\nDefault: 0 (i.e., automatic backups are disabled for this cache cluster).", "SnapshotWindow": "The daily time range (in UTC) during which ElastiCache begins taking a daily snapshot of your node group (shard).\n\nExample: `05:00-09:00`\n\nIf you do not specify this parameter, ElastiCache automatically chooses an appropriate time range.\n\n> This parameter is only valid if the `Engine` parameter is `redis` .", "Tags": "A list of tags to be added to this resource.", @@ -14348,10 +15213,10 @@ "Value": "The tag's value. May be null." }, "AWS::ElastiCache::GlobalReplicationGroup": { - "AutomaticFailoverEnabled": "Specifies whether a read-only replica is automatically promoted to read/write primary if the existing primary fails.\n\n`AutomaticFailoverEnabled` must be enabled for Redis (cluster mode enabled) replication groups.", + "AutomaticFailoverEnabled": "Specifies whether a read-only replica is automatically promoted to read/write primary if the existing primary fails.\n\n`AutomaticFailoverEnabled` must be enabled for Redis OSS (cluster mode enabled) replication groups.", "CacheNodeType": "The cache node type of the Global datastore", "CacheParameterGroupName": "The name of the cache parameter group to use with the Global datastore. It must be compatible with the major engine version used by the Global datastore.", - "EngineVersion": "The Elasticache Redis engine version.", + "EngineVersion": "The Elasticache Redis OSS engine version.", "GlobalNodeGroupCount": "The number of node groups that comprise the Global Datastore.", "GlobalReplicationGroupDescription": "The optional description of the Global datastore", "GlobalReplicationGroupIdSuffix": "The suffix name of a Global Datastore. The suffix guarantees uniqueness of the Global Datastore name across multiple regions.", @@ -14369,7 +15234,7 @@ "ReshardingConfigurations": "A list of PreferredAvailabilityZones objects that specifies the configuration of a node group in the resharded cluster." }, "AWS::ElastiCache::GlobalReplicationGroup ReshardingConfiguration": { - "NodeGroupId": "Either the ElastiCache for Redis supplied 4-digit id or a user supplied id for the node group these configuration values apply to.", + "NodeGroupId": "Either the ElastiCache (Redis OSS) supplied 4-digit id or a user supplied id for the node group these configuration values apply to.", "PreferredAvailabilityZones": "A list of preferred availability zones for the nodes in this cluster." }, "AWS::ElastiCache::ParameterGroup": { @@ -14383,28 +15248,28 @@ "Value": "The tag's value. May be null." }, "AWS::ElastiCache::ReplicationGroup": { - "AtRestEncryptionEnabled": "A flag that enables encryption at rest when set to `true` .\n\nYou cannot modify the value of `AtRestEncryptionEnabled` after the replication group is created. To enable encryption at rest on a replication group you must set `AtRestEncryptionEnabled` to `true` when you create the replication group.\n\n*Required:* Only available when creating a replication group in an Amazon VPC using redis version `3.2.6` or `4.x` onward.\n\nDefault: `false`", - "AuthToken": "*Reserved parameter.* The password used to access a password protected server.\n\n`AuthToken` can be specified only on replication groups where `TransitEncryptionEnabled` is `true` . For more information, see [Authenticating Users with the Redis AUTH Command](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/auth.html) .\n\n> For HIPAA compliance, you must specify `TransitEncryptionEnabled` as `true` , an `AuthToken` , and a `CacheSubnetGroup` . \n\nPassword constraints:\n\n- Must be only printable ASCII characters.\n- Must be at least 16 characters and no more than 128 characters in length.\n- Nonalphanumeric characters are restricted to (!, &, #, $, ^, <, >, -, ).\n\nFor more information, see [AUTH password](https://docs.aws.amazon.com/http://redis.io/commands/AUTH) at http://redis.io/commands/AUTH.\n\n> If ADDING the AuthToken, update requires [Replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .", - "AutoMinorVersionUpgrade": "If you are running Redis engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next minor version upgrade campaign. This parameter is disabled for previous versions.", - "AutomaticFailoverEnabled": "Specifies whether a read-only replica is automatically promoted to read/write primary if the existing primary fails.\n\n`AutomaticFailoverEnabled` must be enabled for Redis (cluster mode enabled) replication groups.\n\nDefault: false", + "AtRestEncryptionEnabled": "A flag that enables encryption at rest when set to `true` .\n\nYou cannot modify the value of `AtRestEncryptionEnabled` after the replication group is created. To enable encryption at rest on a replication group you must set `AtRestEncryptionEnabled` to `true` when you create the replication group.\n\n*Required:* Only available when creating a replication group in an Amazon VPC using Redis OSS version `3.2.6` or `4.x` onward.\n\nDefault: `false`", + "AuthToken": "*Reserved parameter.* The password used to access a password protected server.\n\n`AuthToken` can be specified only on replication groups where `TransitEncryptionEnabled` is `true` . For more information, see [Authenticating Users with the Redis OSS AUTH Command](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/auth.html) .\n\n> For HIPAA compliance, you must specify `TransitEncryptionEnabled` as `true` , an `AuthToken` , and a `CacheSubnetGroup` . \n\nPassword constraints:\n\n- Must be only printable ASCII characters.\n- Must be at least 16 characters and no more than 128 characters in length.\n- Nonalphanumeric characters are restricted to (!, &, #, $, ^, <, >, -, ).\n\nFor more information, see [AUTH password](https://docs.aws.amazon.com/http://redis.io/commands/AUTH) at http://redis.io/commands/AUTH.\n\n> If ADDING the AuthToken, update requires [Replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .", + "AutoMinorVersionUpgrade": "If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next minor version upgrade campaign. This parameter is disabled for previous versions.", + "AutomaticFailoverEnabled": "Specifies whether a read-only replica is automatically promoted to read/write primary if the existing primary fails.\n\n`AutomaticFailoverEnabled` must be enabled for Redis OSS (cluster mode enabled) replication groups.\n\nDefault: false", "CacheNodeType": "The compute and memory capacity of the nodes in the node group (shard).\n\nThe following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts.\n\n- General purpose:\n\n- Current generation:\n\n*M6g node types:* `cache.m6g.large` , `cache.m6g.xlarge` , `cache.m6g.2xlarge` , `cache.m6g.4xlarge` , `cache.m6g.12xlarge` , `cache.m6g.24xlarge`\n\n*M5 node types:* `cache.m5.large` , `cache.m5.xlarge` , `cache.m5.2xlarge` , `cache.m5.4xlarge` , `cache.m5.12xlarge` , `cache.m5.24xlarge`\n\n*M4 node types:* `cache.m4.large` , `cache.m4.xlarge` , `cache.m4.2xlarge` , `cache.m4.4xlarge` , `cache.m4.10xlarge`\n\n*T4g node types:* `cache.t4g.micro` , `cache.t4g.small` , `cache.t4g.medium`\n\n*T3 node types:* `cache.t3.micro` , `cache.t3.small` , `cache.t3.medium`\n\n*T2 node types:* `cache.t2.micro` , `cache.t2.small` , `cache.t2.medium`\n- Previous generation: (not recommended)\n\n*T1 node types:* `cache.t1.micro`\n\n*M1 node types:* `cache.m1.small` , `cache.m1.medium` , `cache.m1.large` , `cache.m1.xlarge`\n\n*M3 node types:* `cache.m3.medium` , `cache.m3.large` , `cache.m3.xlarge` , `cache.m3.2xlarge`\n- Compute optimized:\n\n- Previous generation: (not recommended)\n\n*C1 node types:* `cache.c1.xlarge`\n- Memory optimized:\n\n- Current generation:\n\n*R6gd node types:* `cache.r6gd.xlarge` , `cache.r6gd.2xlarge` , `cache.r6gd.4xlarge` , `cache.r6gd.8xlarge` , `cache.r6gd.12xlarge` , `cache.r6gd.16xlarge`\n\n> The `r6gd` family is available in the following regions: `us-east-2` , `us-east-1` , `us-west-2` , `us-west-1` , `eu-west-1` , `eu-central-1` , `ap-northeast-1` , `ap-southeast-1` , `ap-southeast-2` . \n\n*R6g node types:* `cache.r6g.large` , `cache.r6g.xlarge` , `cache.r6g.2xlarge` , `cache.r6g.4xlarge` , `cache.r6g.12xlarge` , `cache.r6g.24xlarge`\n\n*R5 node types:* `cache.r5.large` , `cache.r5.xlarge` , `cache.r5.2xlarge` , `cache.r5.4xlarge` , `cache.r5.12xlarge` , `cache.r5.24xlarge`\n\n*R4 node types:* `cache.r4.large` , `cache.r4.xlarge` , `cache.r4.2xlarge` , `cache.r4.4xlarge` , `cache.r4.8xlarge` , `cache.r4.16xlarge`\n- Previous generation: (not recommended)\n\n*M2 node types:* `cache.m2.xlarge` , `cache.m2.2xlarge` , `cache.m2.4xlarge`\n\n*R3 node types:* `cache.r3.large` , `cache.r3.xlarge` , `cache.r3.2xlarge` , `cache.r3.4xlarge` , `cache.r3.8xlarge`\n\nFor region availability, see [Supported Node Types by Amazon Region](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion)", - "CacheParameterGroupName": "The name of the parameter group to associate with this replication group. If this argument is omitted, the default cache parameter group for the specified engine is used.\n\nIf you are running Redis version 3.2.4 or later, only one node group (shard), and want to use a default parameter group, we recommend that you specify the parameter group by name.\n\n- To create a Redis (cluster mode disabled) replication group, use `CacheParameterGroupName=default.redis3.2` .\n- To create a Redis (cluster mode enabled) replication group, use `CacheParameterGroupName=default.redis3.2.cluster.on` .", + "CacheParameterGroupName": "The name of the parameter group to associate with this replication group. If this argument is omitted, the default cache parameter group for the specified engine is used.\n\nIf you are running Redis OSS version 3.2.4 or later, only one node group (shard), and want to use a default parameter group, we recommend that you specify the parameter group by name.\n\n- To create a Redis OSS (cluster mode disabled) replication group, use `CacheParameterGroupName=default.redis3.2` .\n- To create a Redis OSS (cluster mode enabled) replication group, use `CacheParameterGroupName=default.redis3.2.cluster.on` .", "CacheSecurityGroupNames": "A list of cache security group names to associate with this replication group.", "CacheSubnetGroupName": "The name of the cache subnet group to be used for the replication group.\n\n> If you're going to launch your cluster in an Amazon VPC, you need to create a subnet group before you start creating a cluster. For more information, see [AWS::ElastiCache::SubnetGroup](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticache-subnetgroup.html) .", - "ClusterMode": "Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first set the cluster mode to Compatible. Compatible mode allows your Redis clients to connect using both cluster mode enabled and cluster mode disabled. After you migrate all Redis clients to use cluster mode enabled, you can then complete cluster mode configuration and set the cluster mode to Enabled. For more information, see [Modify cluster mode](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/modify-cluster-mode.html) .", + "ClusterMode": "Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first set the cluster mode to Compatible. Compatible mode allows your Redis OSS clients to connect using both cluster mode enabled and cluster mode disabled. After you migrate all Redis OSS clients to use cluster mode enabled, you can then complete cluster mode configuration and set the cluster mode to Enabled. For more information, see [Modify cluster mode](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/modify-cluster-mode.html) .", "DataTieringEnabled": "Enables data tiering. Data tiering is only supported for replication groups using the r6gd node type. This parameter must be set to true when using r6gd nodes. For more information, see [Data tiering](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/data-tiering.html) .", "Engine": "The name of the cache engine to be used for the clusters in this replication group. The value must be set to `Redis` .", "EngineVersion": "The version number of the cache engine to be used for the clusters in this replication group. To view the supported cache engine versions, use the `DescribeCacheEngineVersions` operation.\n\n*Important:* You can upgrade to a newer engine version (see [Selecting a Cache Engine and Version](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/SelectEngine.html#VersionManagement) ) in the *ElastiCache User Guide* , but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing cluster or replication group and create it anew with the earlier engine version.", "GlobalReplicationGroupId": "The name of the Global datastore", - "IpDiscovery": "The network type you choose when creating a replication group, either `ipv4` | `ipv6` . IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", + "IpDiscovery": "The network type you choose when creating a replication group, either `ipv4` | `ipv6` . IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", "KmsKeyId": "The ID of the KMS key used to encrypt the disk on the cluster.", "LogDeliveryConfigurations": "Specifies the destination, format and type of the logs.", "MultiAZEnabled": "A flag indicating if you have Multi-AZ enabled to enhance fault tolerance. For more information, see [Minimizing Downtime: Multi-AZ](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/AutoFailover.html) .", - "NetworkType": "Must be either `ipv4` | `ipv6` | `dual_stack` . IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", - "NodeGroupConfiguration": "`NodeGroupConfiguration` is a property of the `AWS::ElastiCache::ReplicationGroup` resource that configures an Amazon ElastiCache (ElastiCache) Redis cluster node group.\n\nIf you set [UseOnlineResharding](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-updatepolicy.html#cfn-attributes-updatepolicy-useonlineresharding) to `true` , you can update `NodeGroupConfiguration` without interruption. When `UseOnlineResharding` is set to `false` , or is not specified, updating `NodeGroupConfiguration` results in [replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .", + "NetworkType": "Must be either `ipv4` | `ipv6` | `dual_stack` . IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", + "NodeGroupConfiguration": "`NodeGroupConfiguration` is a property of the `AWS::ElastiCache::ReplicationGroup` resource that configures an Amazon ElastiCache (ElastiCache) Redis OSS cluster node group.\n\nIf you set [UseOnlineResharding](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-updatepolicy.html#cfn-attributes-updatepolicy-useonlineresharding) to `true` , you can update `NodeGroupConfiguration` without interruption. When `UseOnlineResharding` is set to `false` , or is not specified, updating `NodeGroupConfiguration` results in [replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .", "NotificationTopicArn": "The Amazon Resource Name (ARN) of the Amazon Simple Notification Service (SNS) topic to which notifications are sent.\n\n> The Amazon SNS topic owner must be the same as the cluster owner.", "NumCacheClusters": "The number of clusters this replication group initially has.\n\nThis parameter is not used if there is more than one node group (shard). You should use `ReplicasPerNodeGroup` instead.\n\nIf `AutomaticFailoverEnabled` is `true` , the value of this parameter must be at least 2. If `AutomaticFailoverEnabled` is `false` you can omit this parameter (it will default to 1), or you can explicitly set it to a value between 2 and 6.\n\nThe maximum permitted value for `NumCacheClusters` is 6 (1 primary plus 5 replicas).", - "NumNodeGroups": "An optional parameter that specifies the number of node groups (shards) for this Redis (cluster mode enabled) replication group. For Redis (cluster mode disabled) either omit this parameter or set it to 1.\n\nIf you set [UseOnlineResharding](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-updatepolicy.html#cfn-attributes-updatepolicy-useonlineresharding) to `true` , you can update `NumNodeGroups` without interruption. When `UseOnlineResharding` is set to `false` , or is not specified, updating `NumNodeGroups` results in [replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .\n\nDefault: 1", + "NumNodeGroups": "An optional parameter that specifies the number of node groups (shards) for this Redis OSS (cluster mode enabled) replication group. For Redis OSS (cluster mode disabled) either omit this parameter or set it to 1.\n\nIf you set [UseOnlineResharding](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-updatepolicy.html#cfn-attributes-updatepolicy-useonlineresharding) to `true` , you can update `NumNodeGroups` without interruption. When `UseOnlineResharding` is set to `false` , or is not specified, updating `NumNodeGroups` results in [replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .\n\nDefault: 1", "Port": "The port number on which each member of the replication group accepts connections.", "PreferredCacheClusterAZs": "A list of EC2 Availability Zones in which the replication group's clusters are created. The order of the Availability Zones in the list is the order in which clusters are allocated. The primary cluster is created in the first AZ in the list.\n\nThis parameter is not used if there is more than one node group (shard). You should use `NodeGroupConfiguration` instead.\n\n> If you are creating your replication group in an Amazon VPC (recommended), you can only locate clusters in Availability Zones associated with the subnets in the selected subnet group.\n> \n> The number of Availability Zones listed must equal the value of `NumCacheClusters` . \n\nDefault: system chosen Availability Zones.", "PreferredMaintenanceWindow": "Specifies the weekly time range during which maintenance on the cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period.\n\nValid values for `ddd` are:\n\n- `sun`\n- `mon`\n- `tue`\n- `wed`\n- `thu`\n- `fri`\n- `sat`\n\nExample: `sun:23:00-mon:01:30`", @@ -14413,14 +15278,14 @@ "ReplicationGroupDescription": "A user-created description for the replication group.", "ReplicationGroupId": "The replication group identifier. This parameter is stored as a lowercase string.\n\nConstraints:\n\n- A name must contain from 1 to 40 alphanumeric characters or hyphens.\n- The first character must be a letter.\n- A name cannot end with a hyphen or contain two consecutive hyphens.", "SecurityGroupIds": "One or more Amazon VPC security groups associated with this replication group.\n\nUse this parameter only when you are creating a replication group in an Amazon Virtual Private Cloud (Amazon VPC).", - "SnapshotArns": "A list of Amazon Resource Names (ARN) that uniquely identify the Redis RDB snapshot files stored in Amazon S3. The snapshot files are used to populate the new replication group. The Amazon S3 object name in the ARN cannot contain any commas. The new replication group will have the number of node groups (console: shards) specified by the parameter *NumNodeGroups* or the number of node groups configured by *NodeGroupConfiguration* regardless of the number of ARNs specified here.\n\nExample of an Amazon S3 ARN: `arn:aws:s3:::my_bucket/snapshot1.rdb`", + "SnapshotArns": "A list of Amazon Resource Names (ARN) that uniquely identify the Redis OSS RDB snapshot files stored in Amazon S3. The snapshot files are used to populate the new replication group. The Amazon S3 object name in the ARN cannot contain any commas. The new replication group will have the number of node groups (console: shards) specified by the parameter *NumNodeGroups* or the number of node groups configured by *NodeGroupConfiguration* regardless of the number of ARNs specified here.\n\nExample of an Amazon S3 ARN: `arn:aws:s3:::my_bucket/snapshot1.rdb`", "SnapshotName": "The name of a snapshot from which to restore data into the new replication group. The snapshot status changes to `restoring` while the new replication group is being created.", "SnapshotRetentionLimit": "The number of days for which ElastiCache retains automatic snapshots before deleting them. For example, if you set `SnapshotRetentionLimit` to 5, a snapshot that was taken today is retained for 5 days before being deleted.\n\nDefault: 0 (i.e., automatic backups are disabled for this cluster).", "SnapshotWindow": "The daily time range (in UTC) during which ElastiCache begins taking a daily snapshot of your node group (shard).\n\nExample: `05:00-09:00`\n\nIf you do not specify this parameter, ElastiCache automatically chooses an appropriate time range.", - "SnapshottingClusterId": "The cluster ID that is used as the daily snapshot source for the replication group. This parameter cannot be set for Redis (cluster mode enabled) replication groups.", + "SnapshottingClusterId": "The cluster ID that is used as the daily snapshot source for the replication group. This parameter cannot be set for Redis OSS (cluster mode enabled) replication groups.", "Tags": "A list of tags to be added to this resource. Tags are comma-separated key,value pairs (e.g. Key= `myKey` , Value= `myKeyValue` . You can include multiple tags as shown following: Key= `myKey` , Value= `myKeyValue` Key= `mySecondKey` , Value= `mySecondKeyValue` . Tags on replication groups will be replicated to all nodes.", - "TransitEncryptionEnabled": "A flag that enables in-transit encryption when set to `true` .\n\nYou cannot modify the value of `TransitEncryptionEnabled` after the cluster is created. To enable in-transit encryption on a cluster you must set `TransitEncryptionEnabled` to `true` when you create a cluster.\n\nThis parameter is valid only if the `Engine` parameter is `redis` , the `EngineVersion` parameter is `3.2.6` or `4.x` onward, and the cluster is being created in an Amazon VPC.\n\nIf you enable in-transit encryption, you must also specify a value for `CacheSubnetGroup` .\n\n*Required:* Only available when creating a replication group in an Amazon VPC using redis version `3.2.6` or `4.x` onward.\n\nDefault: `false`\n\n> For HIPAA compliance, you must specify `TransitEncryptionEnabled` as `true` , an `AuthToken` , and a `CacheSubnetGroup` .", - "TransitEncryptionMode": "A setting that allows you to migrate your clients to use in-transit encryption, with no downtime.\n\nWhen setting `TransitEncryptionEnabled` to `true` , you can set your `TransitEncryptionMode` to `preferred` in the same request, to allow both encrypted and unencrypted connections at the same time. Once you migrate all your Redis clients to use encrypted connections you can modify the value to `required` to allow encrypted connections only.\n\nSetting `TransitEncryptionMode` to `required` is a two-step process that requires you to first set the `TransitEncryptionMode` to `preferred` , after that you can set `TransitEncryptionMode` to `required` .\n\nThis process will not trigger the replacement of the replication group.", + "TransitEncryptionEnabled": "A flag that enables in-transit encryption when set to `true` .\n\nYou cannot modify the value of `TransitEncryptionEnabled` after the cluster is created. To enable in-transit encryption on a cluster you must set `TransitEncryptionEnabled` to `true` when you create a cluster.\n\nThis parameter is valid only if the `Engine` parameter is `redis` , the `EngineVersion` parameter is `3.2.6` or `4.x` onward, and the cluster is being created in an Amazon VPC.\n\nIf you enable in-transit encryption, you must also specify a value for `CacheSubnetGroup` .\n\n*Required:* Only available when creating a replication group in an Amazon VPC using Redis OSS version `3.2.6` or `4.x` onward.\n\nDefault: `false`\n\n> For HIPAA compliance, you must specify `TransitEncryptionEnabled` as `true` , an `AuthToken` , and a `CacheSubnetGroup` .", + "TransitEncryptionMode": "A setting that allows you to migrate your clients to use in-transit encryption, with no downtime.\n\nWhen setting `TransitEncryptionEnabled` to `true` , you can set your `TransitEncryptionMode` to `preferred` in the same request, to allow both encrypted and unencrypted connections at the same time. Once you migrate all your Redis OSS clients to use encrypted connections you can modify the value to `required` to allow encrypted connections only.\n\nSetting `TransitEncryptionMode` to `required` is a two-step process that requires you to first set the `TransitEncryptionMode` to `preferred` , after that you can set `TransitEncryptionMode` to `required` .\n\nThis process will not trigger the replacement of the replication group.", "UserGroupIds": "The ID of user group to associate with the replication group." }, "AWS::ElastiCache::ReplicationGroup CloudWatchLogsDestinationDetails": { @@ -14440,7 +15305,7 @@ "LogType": "Valid value is either `slow-log` , which refers to [slow-log](https://docs.aws.amazon.com/https://redis.io/commands/slowlog) or `engine-log` ." }, "AWS::ElastiCache::ReplicationGroup NodeGroupConfiguration": { - "NodeGroupId": "Either the ElastiCache for Redis supplied 4-digit id or a user supplied id for the node group these configuration values apply to.", + "NodeGroupId": "Either the ElastiCache (Redis OSS) supplied 4-digit id or a user supplied id for the node group these configuration values apply to.", "PrimaryAvailabilityZone": "The Availability Zone where the primary node of this node group (shard) is launched.", "ReplicaAvailabilityZones": "A list of Availability Zones to be used for the read replicas. The number of Availability Zones in this list must match the value of `ReplicaCount` or `ReplicasPerNodeGroup` if not specified.", "ReplicaCount": "The number of read replica nodes in this node group (shard).", @@ -14465,7 +15330,7 @@ }, "AWS::ElastiCache::ServerlessCache": { "CacheUsageLimits": "The cache usage limit for the serverless cache.", - "DailySnapshotTime": "The daily time that a cache snapshot will be created. Default is NULL, i.e. snapshots will not be created at a specific time on a daily basis. Available for Redis and Serverless Memcached only.", + "DailySnapshotTime": "The daily time that a cache snapshot will be created. Default is NULL, i.e. snapshots will not be created at a specific time on a daily basis. Available for Redis OSS and Serverless Memcached only.", "Description": "A description of the serverless cache.", "Endpoint": "Represents the information required for client programs to connect to a cache node. This value is read-only.", "Engine": "The engine the serverless cache is compatible with.", @@ -14476,10 +15341,10 @@ "SecurityGroupIds": "The IDs of the EC2 security groups associated with the serverless cache.", "ServerlessCacheName": "The unique identifier of the serverless cache.", "SnapshotArnsToRestore": "The ARN of the snapshot from which to restore data into the new cache.", - "SnapshotRetentionLimit": "The current setting for the number of serverless cache snapshots the system will retain. Available for Redis and Serverless Memcached only.", + "SnapshotRetentionLimit": "The current setting for the number of serverless cache snapshots the system will retain. Available for Redis OSS and Serverless Memcached only.", "SubnetIds": "If no subnet IDs are given and your VPC is in us-west-1, then ElastiCache will select 2 default subnets across AZs in your VPC. For all other Regions, if no subnet IDs are given then ElastiCache will select 3 default subnets across AZs in your default VPC.", "Tags": "A list of tags to be added to this resource.", - "UserGroupId": "The identifier of the user group associated with the serverless cache. Available for Redis only. Default is NULL." + "UserGroupId": "The identifier of the user group associated with the serverless cache. Available for Redis OSS only. Default is NULL." }, "AWS::ElastiCache::ServerlessCache CacheUsageLimits": { "DataStorage": "The maximum data storage limit in the cache, expressed in Gigabytes.", @@ -14689,6 +15554,7 @@ "AlpnPolicy": "[TLS listener] The name of the Application-Layer Protocol Negotiation (ALPN) policy.", "Certificates": "The default SSL server certificate for a secure listener. You must provide exactly one certificate if the listener protocol is HTTPS or TLS.\n\nTo create a certificate list for a secure listener, use [AWS::ElasticLoadBalancingV2::ListenerCertificate](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticloadbalancingv2-listenercertificate.html) .", "DefaultActions": "The actions for the default rule. You cannot define a condition for a default rule.\n\nTo create additional rules for an Application Load Balancer, use [AWS::ElasticLoadBalancingV2::ListenerRule](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticloadbalancingv2-listenerrule.html) .", + "ListenerAttributes": "The listener attributes.", "LoadBalancerArn": "The Amazon Resource Name (ARN) of the load balancer.", "MutualAuthentication": "The mutual authentication configuration information.", "Port": "The port on which the load balancer is listening. You cannot specify a port for a Gateway Load Balancer.", @@ -14741,6 +15607,10 @@ "TargetGroupStickinessConfig": "Information about the target group stickiness for a rule.", "TargetGroups": "Information about how traffic will be distributed between multiple target groups in a forward rule." }, + "AWS::ElasticLoadBalancingV2::Listener ListenerAttribute": { + "Key": "The name of the attribute.\n\nThe following attribute is supported by Network Load Balancers, and Gateway Load Balancers.\n\n- `tcp.idle_timeout.seconds` - The tcp idle timeout value, in seconds. The valid range is 60-6000 seconds. The default is 350 seconds.", + "Value": "The value of the attribute." + }, "AWS::ElasticLoadBalancingV2::Listener MutualAuthentication": { "IgnoreClientCertificateExpiry": "Indicates whether expired client certificates are ignored.", "Mode": "The client certificate handling method. Options are `off` , `passthrough` or `verify` . The default value is `off` .", @@ -14908,7 +15778,7 @@ "Protocol": "The protocol to use for routing traffic to the targets. For Application Load Balancers, the supported protocols are HTTP and HTTPS. For Network Load Balancers, the supported protocols are TCP, TLS, UDP, or TCP_UDP. For Gateway Load Balancers, the supported protocol is GENEVE. A TCP_UDP listener must be associated with a TCP_UDP target group. If the target is a Lambda function, this parameter does not apply.", "ProtocolVersion": "[HTTP/HTTPS protocol] The protocol version. The possible values are `GRPC` , `HTTP1` , and `HTTP2` .", "Tags": "The tags.", - "TargetGroupAttributes": "The attributes.", + "TargetGroupAttributes": "The target group attributes.", "TargetType": "The type of target that you must specify when registering targets with this target group. You can't specify targets for a target group using more than one target type.\n\n- `instance` - Register targets by instance ID. This is the default value.\n- `ip` - Register targets by IP address. You can specify IP addresses from the subnets of the virtual private cloud (VPC) for the target group, the RFC 1918 range (10.0.0.0/8, 172.16.0.0/12, and 192.168.0.0/16), and the RFC 6598 range (100.64.0.0/10). You can't specify publicly routable IP addresses.\n- `lambda` - Register a single Lambda function as a target.\n- `alb` - Register a single Application Load Balancer as a target.", "Targets": "The targets.", "UnhealthyThresholdCount": "The number of consecutive health check failures required before considering a target unhealthy. The range is 2-10. If the target group protocol is TCP, TCP_UDP, UDP, TLS, HTTP or HTTPS, the default is 2. For target groups with a protocol of GENEVE, the default is 2. If the target type is `lambda` , the default is 5.", @@ -15048,7 +15918,7 @@ }, "AWS::EntityResolution::IdMappingWorkflow": { "Description": "A description of the workflow.", - "IdMappingTechniques": "An object which defines the `idMappingType` and the `providerProperties` .", + "IdMappingTechniques": "An object which defines the ID mapping technique and any additional configurations.", "InputSourceConfig": "A list of `InputSource` objects, which have the fields `InputSourceARN` and `SchemaName` .", "OutputSourceConfig": "A list of `IdMappingWorkflowOutputSource` objects, each of which contains fields `OutputS3Path` and `Output` .", "RoleArn": "The Amazon Resource Name (ARN) of the IAM role. AWS Entity Resolution assumes this role to create resources on your behalf as part of workflow execution.", @@ -15056,21 +15926,20 @@ "WorkflowName": "The name of the workflow. There can't be multiple `IdMappingWorkflows` with the same name." }, "AWS::EntityResolution::IdMappingWorkflow IdMappingRuleBasedProperties": { - "AttributeMatchingModel": "", - "RecordMatchingModel": "", - "RuleDefinitionType": "", - "Rules": "" + "AttributeMatchingModel": "The comparison type. You can either choose `ONE_TO_ONE` or `MANY_TO_MANY` as the `attributeMatchingModel` .\n\nIf you choose `MANY_TO_MANY` , the system can match attributes across the sub-types of an attribute type. For example, if the value of the `Email` field of Profile A matches the value of the `BusinessEmail` field of Profile B, the two profiles are matched on the `Email` attribute type.\n\nIf you choose `ONE_TO_ONE` , the system can only match attributes if the sub-types are an exact match. For example, for the `Email` attribute type, the system will only consider it a match if the value of the `Email` field of Profile A matches the value of the `Email` field of Profile B.", + "RecordMatchingModel": "The type of matching record that is allowed to be used in an ID mapping workflow.\n\nIf the value is set to `ONE_SOURCE_TO_ONE_TARGET` , only one record in the source can be matched to the same record in the target.\n\nIf the value is set to `MANY_SOURCE_TO_ONE_TARGET` , multiple records in the source can be matched to one record in the target.", + "RuleDefinitionType": "The set of rules you can use in an ID mapping workflow. The limitations specified for the source or target to define the match rules must be compatible.", + "Rules": "The rules that can be used for ID mapping." }, "AWS::EntityResolution::IdMappingWorkflow IdMappingTechniques": { "IdMappingType": "The type of ID mapping.", - "NormalizationVersion": "", "ProviderProperties": "An object which defines any additional configurations required by the provider service.", - "RuleBasedProperties": "" + "RuleBasedProperties": "An object which defines any additional configurations required by rule-based matching." }, "AWS::EntityResolution::IdMappingWorkflow IdMappingWorkflowInputSource": { - "InputSourceARN": "An AWS Glue table ARN for the input source table.", + "InputSourceARN": "An AWS Glue table Amazon Resource Name (ARN) or a matching workflow ARN for the input source table.", "SchemaArn": "The ARN (Amazon Resource Name) that AWS Entity Resolution generated for the `SchemaMapping` .", - "Type": "The type of ID namespace. There are two types: `SOURCE` and `TARGET` .\n\nThe `SOURCE` contains configurations for `sourceId` data that will be processed in an ID mapping workflow.\n\nThe `TARGET` contains a configuration of `targetId` to which all `sourceIds` will resolve to." + "Type": "The type of ID namespace. There are two types: `SOURCE` and `TARGET` .\n\nThe `SOURCE` contains configurations for `sourceId` data that will be processed in an ID mapping workflow.\n\nThe `TARGET` contains a configuration of `targetId` which all `sourceIds` will resolve to." }, "AWS::EntityResolution::IdMappingWorkflow IdMappingWorkflowOutputSource": { "KMSArn": "Customer AWS KMS ARN for encryption at rest. If not provided, system will use an AWS Entity Resolution managed KMS key.", @@ -15085,8 +15954,8 @@ "ProviderServiceArn": "The ARN of the provider service." }, "AWS::EntityResolution::IdMappingWorkflow Rule": { - "MatchingKeys": "", - "RuleName": "" + "MatchingKeys": "A list of `MatchingKeys` . The `MatchingKeys` must have been defined in the `SchemaMapping` . Two records are considered to match according to this rule if all of the `MatchingKeys` match.", + "RuleName": "A name for the matching rule." }, "AWS::EntityResolution::IdMappingWorkflow Tag": { "Key": "The key of the tag.", @@ -15099,15 +15968,15 @@ "InputSourceConfig": "A list of `InputSource` objects, which have the fields `InputSourceARN` and `SchemaName` .", "RoleArn": "The Amazon Resource Name (ARN) of the IAM role. AWS Entity Resolution assumes this role to access the resources defined in this `IdNamespace` on your behalf as part of the workflow run.", "Tags": "The tags used to organize, track, or control access for this resource.", - "Type": "The type of ID namespace. There are two types: `SOURCE` and `TARGET` .\n\nThe `SOURCE` contains configurations for `sourceId` data that will be processed in an ID mapping workflow.\n\nThe `TARGET` contains a configuration of `targetId` to which all `sourceIds` will resolve to." + "Type": "The type of ID namespace. There are two types: `SOURCE` and `TARGET` .\n\nThe `SOURCE` contains configurations for `sourceId` data that will be processed in an ID mapping workflow.\n\nThe `TARGET` contains a configuration of `targetId` which all `sourceIds` will resolve to." }, "AWS::EntityResolution::IdNamespace IdNamespaceIdMappingWorkflowProperties": { "IdMappingType": "The type of ID mapping.", "ProviderProperties": "An object which defines any additional configurations required by the provider service.", - "RuleBasedProperties": "" + "RuleBasedProperties": "An object which defines any additional configurations required by rule-based matching." }, "AWS::EntityResolution::IdNamespace IdNamespaceInputSource": { - "InputSourceARN": "An AWS Glue table ARN for the input source table.", + "InputSourceARN": "An AWS Glue table Amazon Resource Name (ARN) or a matching workflow ARN for the input source table.", "SchemaName": "The name of the schema." }, "AWS::EntityResolution::IdNamespace NamespaceProviderProperties": { @@ -15115,14 +15984,14 @@ "ProviderServiceArn": "The Amazon Resource Name (ARN) of the provider service." }, "AWS::EntityResolution::IdNamespace NamespaceRuleBasedProperties": { - "AttributeMatchingModel": "", - "RecordMatchingModels": "", - "RuleDefinitionTypes": "", - "Rules": "" + "AttributeMatchingModel": "The comparison type. You can either choose `ONE_TO_ONE` or `MANY_TO_MANY` as the `attributeMatchingModel` .\n\nIf you choose `MANY_TO_MANY` , the system can match attributes across the sub-types of an attribute type. For example, if the value of the `Email` field of Profile A matches the value of `BusinessEmail` field of Profile B, the two profiles are matched on the `Email` attribute type.\n\nIf you choose `ONE_TO_ONE` , the system can only match attributes if the sub-types are an exact match. For example, for the `Email` attribute type, the system will only consider it a match if the value of the `Email` field of Profile A matches the value of the `Email` field of Profile B.", + "RecordMatchingModels": "The type of matching record that is allowed to be used in an ID mapping workflow.\n\nIf the value is set to `ONE_SOURCE_TO_ONE_TARGET` , only one record in the source is matched to one record in the target.\n\nIf the value is set to `MANY_SOURCE_TO_ONE_TARGET` , all matching records in the source are matched to one record in the target.", + "RuleDefinitionTypes": "The sets of rules you can use in an ID mapping workflow. The limitations specified for the source and target must be compatible.", + "Rules": "The rules for the ID namespace." }, "AWS::EntityResolution::IdNamespace Rule": { - "MatchingKeys": "", - "RuleName": "" + "MatchingKeys": "A list of `MatchingKeys` . The `MatchingKeys` must have been defined in the `SchemaMapping` . Two records are considered to match according to this rule if all of the `MatchingKeys` match.", + "RuleName": "A name for the matching rule." }, "AWS::EntityResolution::IdNamespace Tag": { "Key": "The key of the tag.", @@ -15130,6 +15999,7 @@ }, "AWS::EntityResolution::MatchingWorkflow": { "Description": "A description of the workflow.", + "IncrementalRunConfig": "An object which defines an incremental run type and has only `incrementalRunType` as a field.", "InputSourceConfig": "A list of `InputSource` objects, which have the fields `InputSourceARN` and `SchemaName` .", "OutputSourceConfig": "A list of `OutputSource` objects, each of which contains fields `OutputS3Path` , `ApplyNormalization` , and `Output` .", "ResolutionTechniques": "An object which defines the `resolutionType` and the `ruleBasedProperties` .", @@ -15137,6 +16007,9 @@ "Tags": "The tags used to organize, track, or control access for this resource.", "WorkflowName": "The name of the workflow. There can't be multiple `MatchingWorkflows` with the same name." }, + "AWS::EntityResolution::MatchingWorkflow IncrementalRunConfig": { + "IncrementalRunType": "The type of incremental run. It takes only one value: `IMMEDIATE` ." + }, "AWS::EntityResolution::MatchingWorkflow InputSource": { "ApplyNormalization": "Normalizes the attributes defined in the schema in the input data. For example, if an attribute has an `AttributeType` of `PHONE_NUMBER` , and the data in the input table is in a format of 1234567890, AWS Entity Resolution will normalize this field in the output to (123)-456-7890.", "InputSourceARN": "An object containing `InputSourceARN` , `SchemaName` , and `ApplyNormalization` .", @@ -15170,8 +16043,8 @@ "RuleName": "A name for the matching rule." }, "AWS::EntityResolution::MatchingWorkflow RuleBasedProperties": { - "AttributeMatchingModel": "The comparison type. You can either choose `ONE_TO_ONE` or `MANY_TO_MANY` as the AttributeMatchingModel. When choosing `MANY_TO_MANY` , the system can match attributes across the sub-types of an attribute type. For example, if the value of the `Email` field of Profile A and the value of `BusinessEmail` field of Profile B matches, the two profiles are matched on the `Email` type. When choosing `ONE_TO_ONE` ,the system can only match if the sub-types are exact matches. For example, only when the value of the `Email` field of Profile A and the value of the `Email` field of Profile B matches, the two profiles are matched on the `Email` type.", - "MatchPurpose": "", + "AttributeMatchingModel": "The comparison type. You can either choose `ONE_TO_ONE` or `MANY_TO_MANY` as the `attributeMatchingModel` .\n\nIf you choose `MANY_TO_MANY` , the system can match attributes across the sub-types of an attribute type. For example, if the value of the `Email` field of Profile A and the value of `BusinessEmail` field of Profile B matches, the two profiles are matched on the `Email` attribute type.\n\nIf you choose `ONE_TO_ONE` , the system can only match attributes if the sub-types are an exact match. For example, for the `Email` attribute type, the system will only consider it a match if the value of the `Email` field of Profile A matches the value of the `Email` field of Profile B.", + "MatchPurpose": "An indicator of whether to generate IDs and index the data or not.\n\nIf you choose `IDENTIFIER_GENERATION` , the process generates IDs and indexes the data.\n\nIf you choose `INDEXING` , the process indexes the data without generating IDs.", "Rules": "A list of `Rule` objects, each of which have fields `RuleName` and `MatchingKeys` ." }, "AWS::EntityResolution::MatchingWorkflow Tag": { @@ -15195,8 +16068,8 @@ "AWS::EntityResolution::SchemaMapping SchemaInputAttribute": { "FieldName": "A string containing the field name.", "GroupName": "A string that instructs AWS Entity Resolution to combine several columns into a unified column with the identical attribute type.\n\nFor example, when working with columns such as `first_name` , `middle_name` , and `last_name` , assigning them a common `groupName` will prompt AWS Entity Resolution to concatenate them into a single value.", - "Hashed": "", - "MatchKey": "A key that allows grouping of multiple input attributes into a unified matching group. For example, consider a scenario where the source table contains various addresses, such as `business_address` and `shipping_address` . By assigning a `matchKey` called `address` to both attributes, AWS Entity Resolution will match records across these fields to create a consolidated matching group. If no `matchKey` is specified for a column, it won't be utilized for matching purposes but will still be included in the output table.", + "Hashed": "Indicates if the column values are hashed in the schema input. If the value is set to `TRUE` , the column values are hashed. If the value is set to `FALSE` , the column values are cleartext.", + "MatchKey": "A key that allows grouping of multiple input attributes into a unified matching group.\n\nFor example, consider a scenario where the source table contains various addresses, such as `business_address` and `shipping_address` . By assigning a `matchKey` called `address` to both attributes, AWS Entity Resolution will match records across these fields to create a consolidated matching group.\n\nIf no `matchKey` is specified for a column, it won't be utilized for matching purposes but will still be included in the output table.", "SubType": "The subtype of the attribute, selected from a list of values.", "Type": "The type of the attribute, selected from a list of values." }, @@ -15774,7 +16647,7 @@ }, "AWS::FSx::DataRepositoryAssociation": { "BatchImportMetaDataOnCreate": "A boolean flag indicating whether an import data repository task to import metadata should run after the data repository association is created. The task runs if this flag is set to `true` .", - "DataRepositoryPath": "The path to the Amazon S3 data repository that will be linked to the file system. The path can be an S3 bucket or prefix in the format `s3://myBucket/myPrefix/` . This path specifies where in the S3 data repository files will be imported from or exported to.", + "DataRepositoryPath": "The path to the Amazon S3 data repository that will be linked to the file system. The path can be an S3 bucket or prefix in the format `s3://bucket-name/prefix/` . This path specifies where in the S3 data repository files will be imported from or exported to.", "FileSystemId": "The ID of the file system on which the data repository association is configured.", "FileSystemPath": "A path on the Amazon FSx for Lustre file system that points to a high-level directory (such as `/ns1/` ) or subdirectory (such as `/ns1/subdir/` ) that will be mapped 1-1 with `DataRepositoryPath` . The leading forward slash in the name is required. Two data repository associations cannot have overlapping file system paths. For example, if a data repository is associated with file system path `/ns1/` , then you cannot link another data repository with file system path `/ns1/ns2` .\n\nThis path specifies where in your file system files will be exported from or imported to. This file system directory can be linked to only one Amazon S3 bucket, and no other S3 bucket can be linked to the directory.\n\n> If you specify only a forward slash ( `/` ) as the file system path, you can link only one data repository to the file system. You can only specify \"/\" as the file system path for the first data repository associated with a file system.", "ImportedFileChunkSize": "For files imported from a data repository, this value determines the stripe count and maximum amount of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a single file can be striped across is limited by the total number of disks that make up the file system or cache.\n\nThe default chunk size is 1,024 MiB (1 GiB) and can go as high as 512,000 MiB (500 GiB). Amazon S3 objects have a maximum size of 5 TB.", @@ -16363,7 +17236,7 @@ "FleetType": "Indicates whether to use On-Demand or Spot instances for this fleet. By default, this property is set to `ON_DEMAND` . Learn more about when to use [On-Demand versus Spot Instances](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-ec2-instances.html#gamelift-ec2-instances-spot) . This fleet property can't be changed after the fleet is created.", "InstanceRoleARN": "A unique identifier for an IAM role with access permissions to other AWS services. Any application that runs on an instance in the fleet--including install scripts, server processes, and other processes--can use these permissions to interact with AWS resources that you own or have access to. For more information about using the role with your game server builds, see [Communicate with other AWS resources from your fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-resources.html) . This attribute is used with fleets where `ComputeType` is \"EC2\" or \"Container\".", "InstanceRoleCredentialsProvider": "Indicates that fleet instances maintain a shared credentials file for the IAM role defined in `InstanceRoleArn` . Shared credentials allow applications that are deployed with the game server executable to communicate with other AWS resources. This property is used only when the game server is integrated with the server SDK version 5.x. For more information about using shared credentials, see [Communicate with other AWS resources from your fleets](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-server-resources.html) . This attribute is used with fleets where `ComputeType` is \"EC2\" or \"Container\".", - "Locations": "A set of remote locations to deploy additional instances to and manage as part of the fleet. This parameter can only be used when creating fleets in AWS Regions that support multiple locations. You can add any Amazon GameLift-supported AWS Region as a remote location, in the form of an AWS Region code, such as `us-west-2` or Local Zone code. To create a fleet with instances in the home Region only, don't set this parameter.\n\nWhen using this parameter, Amazon GameLift requires you to include your home location in the request.", + "Locations": "A set of remote locations to deploy additional instances to and manage as a multi-location fleet. Use this parameter when creating a fleet in AWS Regions that support multiple locations. You can add any AWS Region or Local Zone that's supported by Amazon GameLift. Provide a list of one or more AWS Region codes, such as `us-west-2` , or Local Zone names. When using this parameter, Amazon GameLift requires you to include your home location in the request. For a list of supported Regions and Local Zones, see [Amazon GameLift service locations](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-regions.html) for managed hosting.", "MaxSize": "The maximum number of instances that are allowed in the specified fleet location. If this parameter is not set, the default is 1.", "MetricGroups": "The name of an AWS CloudWatch metric group to add this fleet to. A metric group is used to aggregate the metrics for multiple fleets. You can specify an existing metric group name or set a new name to create a new metric group. A fleet can be included in only one metric group at a time.", "MinSize": "The minimum number of instances that are allowed in the specified fleet location. If this parameter is not set, the default is 0.", @@ -16407,7 +17280,7 @@ "MinSize": "The minimum number of instances that are allowed in the specified fleet location. If this parameter is not set, the default is 0." }, "AWS::GameLift::Fleet LocationConfiguration": { - "Location": "An AWS Region code, such as `us-west-2` .", + "Location": "An AWS Region code, such as `us-west-2` . For a list of supported Regions and Local Zones, see [Amazon GameLift service locations](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-regions.html) for managed hosting.", "LocationCapacity": "Current resource capacity settings for managed EC2 fleets and container fleets. For multi-location fleets, location values might refer to a fleet's remote location or its home Region.\n\n*Returned by:* [DescribeFleetCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeFleetCapacity.html) , [DescribeFleetLocationCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_DescribeFleetLocationCapacity.html) , [UpdateFleetCapacity](https://docs.aws.amazon.com/gamelift/latest/apireference/API_UpdateFleetCapacity.html)" }, "AWS::GameLift::Fleet ResourceCreationLimitPolicy": { @@ -16506,7 +17379,7 @@ }, "AWS::GameLift::Location": { "LocationName": "A descriptive name for the custom location.", - "Tags": "A list of labels to assign to the new matchmaking configuration resource. Tags are developer-defined key-value pairs. Tagging AWS resources are useful for resource management, access management and cost allocation. For more information, see [Tagging AWS Resources](https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) in the *AWS General Rareference* ." + "Tags": "A list of labels to assign to the new resource. Tags are developer-defined key-value pairs. Tagging AWS resources are useful for resource management, access management, and cost allocation. For more information, see [Tagging AWS Resources](https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) in the *AWS General Rareference* ." }, "AWS::GameLift::Location Tag": { "Key": "The key for a developer-defined key value pair for tagging an AWS resource.", @@ -16582,6 +17455,7 @@ "Tags": "Add tags for a cross-account attachment.\n\nFor more information, see [Tagging in AWS Global Accelerator](https://docs.aws.amazon.com/global-accelerator/latest/dg/tagging-in-global-accelerator.html) in the *AWS Global Accelerator Developer Guide* ." }, "AWS::GlobalAccelerator::CrossAccountAttachment Resource": { + "Cidr": "An IP address range, in CIDR format, that is specified as resource. The address must be provisioned and advertised in AWS Global Accelerator by following the bring your own IP address (BYOIP) process for Global Accelerator\n\nFor more information, see [Bring your own IP addresses (BYOIP)](https://docs.aws.amazon.com/global-accelerator/latest/dg/using-byoip.html) in the AWS Global Accelerator Developer Guide.", "EndpointId": "The endpoint ID for the endpoint that is specified as a AWS resource.\n\nAn endpoint ID for the cross-account feature is the ARN of an AWS resource, such as a Network Load Balancer, that Global Accelerator supports as an endpoint for an accelerator.", "Region": "The AWS Region where a shared endpoint resource is located." }, @@ -16659,7 +17533,7 @@ }, "AWS::Glue::Connection ConnectionInput": { "ConnectionProperties": "These key-value pairs define parameters for the connection.", - "ConnectionType": "The type of the connection. Currently, these types are supported:\n\n- `JDBC` - Designates a connection to a database through Java Database Connectivity (JDBC).\n\n`JDBC` Connections use the following ConnectionParameters.\n\n- Required: All of ( `HOST` , `PORT` , `JDBC_ENGINE` ) or `JDBC_CONNECTION_URL` .\n- Required: All of ( `USERNAME` , `PASSWORD` ) or `SECRET_ID` .\n- Optional: `JDBC_ENFORCE_SSL` , `CUSTOM_JDBC_CERT` , `CUSTOM_JDBC_CERT_STRING` , `SKIP_CUSTOM_JDBC_CERT_VALIDATION` . These parameters are used to configure SSL with JDBC.\n- `KAFKA` - Designates a connection to an Apache Kafka streaming platform.\n\n`KAFKA` Connections use the following ConnectionParameters.\n\n- Required: `KAFKA_BOOTSTRAP_SERVERS` .\n- Optional: `KAFKA_SSL_ENABLED` , `KAFKA_CUSTOM_CERT` , `KAFKA_SKIP_CUSTOM_CERT_VALIDATION` . These parameters are used to configure SSL with `KAFKA` .\n- Optional: `KAFKA_CLIENT_KEYSTORE` , `KAFKA_CLIENT_KEYSTORE_PASSWORD` , `KAFKA_CLIENT_KEY_PASSWORD` , `ENCRYPTED_KAFKA_CLIENT_KEYSTORE_PASSWORD` , `ENCRYPTED_KAFKA_CLIENT_KEY_PASSWORD` . These parameters are used to configure TLS client configuration with SSL in `KAFKA` .\n- Optional: `KAFKA_SASL_MECHANISM` . Can be specified as `SCRAM-SHA-512` , `GSSAPI` , or `AWS_MSK_IAM` .\n- Optional: `KAFKA_SASL_SCRAM_USERNAME` , `KAFKA_SASL_SCRAM_PASSWORD` , `ENCRYPTED_KAFKA_SASL_SCRAM_PASSWORD` . These parameters are used to configure SASL/SCRAM-SHA-512 authentication with `KAFKA` .\n- Optional: `KAFKA_SASL_GSSAPI_KEYTAB` , `KAFKA_SASL_GSSAPI_KRB5_CONF` , `KAFKA_SASL_GSSAPI_SERVICE` , `KAFKA_SASL_GSSAPI_PRINCIPAL` . These parameters are used to configure SASL/GSSAPI authentication with `KAFKA` .\n- `MONGODB` - Designates a connection to a MongoDB document database.\n\n`MONGODB` Connections use the following ConnectionParameters.\n\n- Required: `CONNECTION_URL` .\n- Required: All of ( `USERNAME` , `PASSWORD` ) or `SECRET_ID` .\n- `SALESFORCE` - Designates a connection to Salesforce using OAuth authencation.\n\n- Requires the `AuthenticationConfiguration` member to be configured.\n- `NETWORK` - Designates a network connection to a data source within an Amazon Virtual Private Cloud environment (Amazon VPC).\n\n`NETWORK` Connections do not require ConnectionParameters. Instead, provide a PhysicalConnectionRequirements.\n- `MARKETPLACE` - Uses configuration settings contained in a connector purchased from AWS Marketplace to read from and write to data stores that are not natively supported by AWS Glue .\n\n`MARKETPLACE` Connections use the following ConnectionParameters.\n\n- Required: `CONNECTOR_TYPE` , `CONNECTOR_URL` , `CONNECTOR_CLASS_NAME` , `CONNECTION_URL` .\n- Required for `JDBC` `CONNECTOR_TYPE` connections: All of ( `USERNAME` , `PASSWORD` ) or `SECRET_ID` .\n- `CUSTOM` - Uses configuration settings contained in a custom connector to read from and write to data stores that are not natively supported by AWS Glue .\n\n`SFTP` is not supported.\n\nFor more information about how optional ConnectionProperties are used to configure features in AWS Glue , consult [AWS Glue connection properties](https://docs.aws.amazon.com/glue/latest/dg/connection-defining.html) .\n\nFor more information about how optional ConnectionProperties are used to configure features in AWS Glue Studio, consult [Using connectors and connections](https://docs.aws.amazon.com/glue/latest/ug/connectors-chapter.html) .", + "ConnectionType": "The type of the connection. Currently, these types are supported:\n\n- `JDBC` - Designates a connection to a database through Java Database Connectivity (JDBC).\n\n`JDBC` Connections use the following ConnectionParameters.\n\n- Required: All of ( `HOST` , `PORT` , `JDBC_ENGINE` ) or `JDBC_CONNECTION_URL` .\n- Required: All of ( `USERNAME` , `PASSWORD` ) or `SECRET_ID` .\n- Optional: `JDBC_ENFORCE_SSL` , `CUSTOM_JDBC_CERT` , `CUSTOM_JDBC_CERT_STRING` , `SKIP_CUSTOM_JDBC_CERT_VALIDATION` . These parameters are used to configure SSL with JDBC.\n- `KAFKA` - Designates a connection to an Apache Kafka streaming platform.\n\n`KAFKA` Connections use the following ConnectionParameters.\n\n- Required: `KAFKA_BOOTSTRAP_SERVERS` .\n- Optional: `KAFKA_SSL_ENABLED` , `KAFKA_CUSTOM_CERT` , `KAFKA_SKIP_CUSTOM_CERT_VALIDATION` . These parameters are used to configure SSL with `KAFKA` .\n- Optional: `KAFKA_CLIENT_KEYSTORE` , `KAFKA_CLIENT_KEYSTORE_PASSWORD` , `KAFKA_CLIENT_KEY_PASSWORD` , `ENCRYPTED_KAFKA_CLIENT_KEYSTORE_PASSWORD` , `ENCRYPTED_KAFKA_CLIENT_KEY_PASSWORD` . These parameters are used to configure TLS client configuration with SSL in `KAFKA` .\n- Optional: `KAFKA_SASL_MECHANISM` . Can be specified as `SCRAM-SHA-512` , `GSSAPI` , or `AWS_MSK_IAM` .\n- Optional: `KAFKA_SASL_SCRAM_USERNAME` , `KAFKA_SASL_SCRAM_PASSWORD` , `ENCRYPTED_KAFKA_SASL_SCRAM_PASSWORD` . These parameters are used to configure SASL/SCRAM-SHA-512 authentication with `KAFKA` .\n- Optional: `KAFKA_SASL_GSSAPI_KEYTAB` , `KAFKA_SASL_GSSAPI_KRB5_CONF` , `KAFKA_SASL_GSSAPI_SERVICE` , `KAFKA_SASL_GSSAPI_PRINCIPAL` . These parameters are used to configure SASL/GSSAPI authentication with `KAFKA` .\n- `MONGODB` - Designates a connection to a MongoDB document database.\n\n`MONGODB` Connections use the following ConnectionParameters.\n\n- Required: `CONNECTION_URL` .\n- Required: All of ( `USERNAME` , `PASSWORD` ) or `SECRET_ID` .\n- `SALESFORCE` - Designates a connection to Salesforce using OAuth authencation.\n\n- Requires the `AuthenticationConfiguration` member to be configured.\n- `VIEW_VALIDATION_REDSHIFT` - Designates a connection used for view validation by Amazon Redshift.\n- `VIEW_VALIDATION_ATHENA` - Designates a connection used for view validation by Amazon Athena.\n- `NETWORK` - Designates a network connection to a data source within an Amazon Virtual Private Cloud environment (Amazon VPC).\n\n`NETWORK` Connections do not require ConnectionParameters. Instead, provide a PhysicalConnectionRequirements.\n- `MARKETPLACE` - Uses configuration settings contained in a connector purchased from AWS Marketplace to read from and write to data stores that are not natively supported by AWS Glue .\n\n`MARKETPLACE` Connections use the following ConnectionParameters.\n\n- Required: `CONNECTOR_TYPE` , `CONNECTOR_URL` , `CONNECTOR_CLASS_NAME` , `CONNECTION_URL` .\n- Required for `JDBC` `CONNECTOR_TYPE` connections: All of ( `USERNAME` , `PASSWORD` ) or `SECRET_ID` .\n- `CUSTOM` - Uses configuration settings contained in a custom connector to read from and write to data stores that are not natively supported by AWS Glue .\n\n`SFTP` is not supported.\n\nFor more information about how optional ConnectionProperties are used to configure features in AWS Glue , consult [AWS Glue connection properties](https://docs.aws.amazon.com/glue/latest/dg/connection-defining.html) .\n\nFor more information about how optional ConnectionProperties are used to configure features in AWS Glue Studio, consult [Using connectors and connections](https://docs.aws.amazon.com/glue/latest/ug/connectors-chapter.html) .", "Description": "The description of the connection.", "MatchCriteria": "A list of criteria that can be used in selecting this connection.", "Name": "The name of the connection.", @@ -16786,7 +17660,8 @@ }, "AWS::Glue::Database": { "CatalogId": "The AWS account ID for the account in which to create the catalog object.\n\n> To specify the account ID, you can use the `Ref` intrinsic function with the `AWS::AccountId` pseudo parameter. For example: `!Ref AWS::AccountId`", - "DatabaseInput": "The metadata for the database." + "DatabaseInput": "The metadata for the database.", + "DatabaseName": "The name of the catalog database." }, "AWS::Glue::Database DataLakePrincipal": { "DataLakePrincipalIdentifier": "An identifier for the AWS Lake Formation principal." @@ -16839,6 +17714,8 @@ "ExecutionClass": "Indicates whether the job is run with a standard or flexible execution class. The standard execution class is ideal for time-sensitive workloads that require fast job startup and dedicated resources.\n\nThe flexible execution class is appropriate for time-insensitive jobs whose start and completion times may vary.\n\nOnly jobs with AWS Glue version 3.0 and above and command type `glueetl` will be allowed to set `ExecutionClass` to `FLEX` . The flexible execution class is available for Spark jobs.", "ExecutionProperty": "The maximum number of concurrent runs that are allowed for this job.", "GlueVersion": "Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for jobs of type Spark.\n\nFor more information about the available AWS Glue versions and corresponding Spark and Python versions, see [Glue version](https://docs.aws.amazon.com/glue/latest/dg/add-job.html) in the developer guide.\n\nJobs that are created without specifying a Glue version default to the latest Glue version available.", + "JobMode": "A mode that describes how a job was created. Valid values are:\n\n- `SCRIPT` - The job was created using the AWS Glue Studio script editor.\n- `VISUAL` - The job was created using the AWS Glue Studio visual editor.\n- `NOTEBOOK` - The job was created using an interactive sessions notebook.\n\nWhen the `JobMode` field is missing or null, `SCRIPT` is assigned as the default value.", + "JobRunQueuingEnabled": "Specifies whether job run queuing is enabled for the job runs for this job.\n\nA value of true means job run queuing is enabled for the job runs. If false or not populated, the job runs will not be considered for queueing.\n\nIf this field does not match the value set in the job run, then the value from the job run field will be used.", "LogUri": "This field is reserved for future use.", "MaintenanceWindow": "This field specifies a day of the week and hour for a maintenance window for streaming jobs. AWS Glue periodically performs maintenance activities. During these maintenance windows, AWS Glue will need to restart your streaming jobs.\n\nAWS Glue will restart the job within 3 hours of the specified maintenance window. For instance, if you set up the maintenance window for Monday at 10:00AM GMT, your jobs will be restarted between 10:00AM GMT to 1:00PM GMT.", "MaxCapacity": "The number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory.\n\nDo not set `Max Capacity` if using `WorkerType` and `NumberOfWorkers` .\n\nThe value that can be allocated for `MaxCapacity` depends on whether you are running a Python shell job or an Apache Spark ETL job:\n\n- When you specify a Python shell job ( `JobCommand.Name` =\"pythonshell\"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.\n- When you specify an Apache Spark ETL job ( `JobCommand.Name` =\"glueetl\"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.", @@ -16908,7 +17785,7 @@ }, "AWS::Glue::MLTransform TransformParameters": { "FindMatchesParameters": "The parameters for the find matches algorithm.", - "TransformType": "The type of machine learning transform. `FIND_MATCHES` is the only option.\n\nFor information about the types of machine learning transforms, see [Creating Machine Learning Transforms](https://docs.aws.amazon.com/glue/latest/dg/add-job-machine-learning-transform.html) ." + "TransformType": "The type of machine learning transform. `FIND_MATCHES` is the only option.\n\nFor information about the types of machine learning transforms, see [Working with machine learning transforms](https://docs.aws.amazon.com/glue/latest/dg/console-machine-learning-transforms.html) ." }, "AWS::Glue::Partition": { "CatalogId": "The AWS account ID of the catalog in which the partion is to be created.\n\n> To specify the account ID, you can use the `Ref` intrinsic function with the `AWS::AccountId` pseudo parameter. For example: `!Ref AWS::AccountId`", @@ -17156,6 +18033,26 @@ "Conditions": "A list of the conditions that determine when the trigger will fire.", "Logical": "An optional field if only one condition is listed. If multiple conditions are listed, then this field is required." }, + "AWS::Glue::UsageProfile": { + "Configuration": "", + "Description": "A description of the usage profile.", + "Name": "The name of the usage profile.", + "Tags": "" + }, + "AWS::Glue::UsageProfile ConfigurationObject": { + "AllowedValues": "A list of allowed values for the parameter.", + "DefaultValue": "A default value for the parameter.", + "MaxValue": "A maximum allowed value for the parameter.", + "MinValue": "A minimum allowed value for the parameter." + }, + "AWS::Glue::UsageProfile ProfileConfiguration": { + "JobConfiguration": "A key-value map of configuration parameters for AWS Glue jobs.", + "SessionConfiguration": "A key-value map of configuration parameters for AWS Glue sessions." + }, + "AWS::Glue::UsageProfile Tag": { + "Key": "The tag key. The key is required when you create a tag on an object. The key is case-sensitive, and must not contain the prefix aws.", + "Value": "The tag value. The value is optional when you create a tag on an object. The value is case-sensitive, and must not contain the prefix aws." + }, "AWS::Glue::Workflow": { "DefaultRunProperties": "A collection of properties to be used as part of each execution of the workflow", "Description": "A description of the workflow", @@ -17728,8 +18625,8 @@ "Polarization": "The polarization of the spectrum. Valid values are `\"RIGHT_HAND\"` and `\"LEFT_HAND\"` . Capturing both `\"RIGHT_HAND\"` and `\"LEFT_HAND\"` polarization requires two separate configs." }, "AWS::GroundStation::Config Tag": { - "Key": "", - "Value": "" + "Key": "Name of the object key.", + "Value": "Value of the tag." }, "AWS::GroundStation::Config TrackingConfig": { "Autotrack": "Specifies whether or not to use autotrack. `REMOVED` specifies that program track should only be used during the contact. `PREFERRED` specifies that autotracking is preferred during the contact but fallback to program track if the signal is lost. `REQUIRED` specifies that autotracking is required during the contact and not to use program track if the signal is lost." @@ -17749,11 +18646,11 @@ "Tags": "Tags assigned to a resource." }, "AWS::GroundStation::DataflowEndpointGroup AwsGroundStationAgentEndpoint": { - "AgentStatus": "", - "AuditResults": "", - "EgressAddress": "", - "IngressAddress": "", - "Name": "" + "AgentStatus": "The status of AgentEndpoint.", + "AuditResults": "The results of the audit.", + "EgressAddress": "The egress address of AgentEndpoint.", + "IngressAddress": "The ingress address of AgentEndpoint.", + "Name": "Name string associated with AgentEndpoint. Used as a human-readable identifier for AgentEndpoint." }, "AWS::GroundStation::DataflowEndpointGroup ConnectionDetails": { "Mtu": "Maximum transmission unit (MTU) size in bytes of a dataflow endpoint.", @@ -17791,8 +18688,8 @@ "Port": "The port of the endpoint, such as `55888` ." }, "AWS::GroundStation::DataflowEndpointGroup Tag": { - "Key": "", - "Value": "" + "Key": "Name of the object key.", + "Value": "Value of the tag." }, "AWS::GroundStation::MissionProfile": { "ContactPostPassDurationSeconds": "Amount of time in seconds after a contact ends that you\u2019d like to receive a Ground Station Contact State Change indicating the pass has finished.", @@ -17810,12 +18707,12 @@ "Source": "The ARN of the source for this dataflow edge. For example, specify the ARN of an antenna downlink config for a downlink edge or a dataflow endpoint config for an uplink edge." }, "AWS::GroundStation::MissionProfile StreamsKmsKey": { - "KmsAliasArn": "", - "KmsKeyArn": "" + "KmsAliasArn": "KMS Alias Arn.", + "KmsKeyArn": "KMS Key Arn." }, "AWS::GroundStation::MissionProfile Tag": { - "Key": "", - "Value": "" + "Key": "Name of the object key.", + "Value": "Value of the tag." }, "AWS::GuardDuty::Detector": { "DataSources": "Describes which data sources will be enabled for the detector.", @@ -17860,7 +18757,7 @@ "AWS::GuardDuty::Filter": { "Action": "Specifies the action that is to be applied to the findings that match the filter.", "Description": "The description of the filter. Valid characters include alphanumeric characters, and special characters such as hyphen, period, colon, underscore, parentheses ( `{ }` , `[ ]` , and `( )` ), forward slash, horizontal tab, vertical tab, newline, form feed, return, and whitespace.", - "DetectorId": "The ID of the detector belonging to the GuardDuty account that you want to create a filter for.", + "DetectorId": "The detector ID associated with the GuardDuty account for which you want to create a filter.\n\nTo find the `detectorId` in the current Region, see the\nSettings page in the GuardDuty console, or run the [ListDetectors](https://docs.aws.amazon.com/guardduty/latest/APIReference/API_ListDetectors.html) API.", "FindingCriteria": "Represents the criteria to be used in the filter for querying findings.", "Name": "The name of the filter. Valid characters include period (.), underscore (_), dash (-), and alphanumeric characters. A whitespace is considered to be an invalid character.", "Rank": "Specifies the position of the filter in the list of current filters. Also specifies the order in which this filter is applied to the findings. The minimum value for this property is 1 and the maximum is 100.\n\nBy default, filters may not be created in the same order as they are ranked. To ensure that the filters are created in the expected order, you can use an optional attribute, [DependsOn](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-dependson.html) , with the following syntax: `\"DependsOn\":[ \"ObjectName\" ]` .", @@ -17889,7 +18786,7 @@ }, "AWS::GuardDuty::IPSet": { "Activate": "Indicates whether or not GuardDuty uses the `IPSet` .", - "DetectorId": "The unique ID of the detector of the GuardDuty account that you want to create an IPSet for.", + "DetectorId": "The unique ID of the detector of the GuardDuty account for which you want to create an IPSet.\n\nTo find the `detectorId` in the current Region, see the\nSettings page in the GuardDuty console, or run the [ListDetectors](https://docs.aws.amazon.com/guardduty/latest/APIReference/API_ListDetectors.html) API.", "Format": "The format of the file that contains the IPSet.", "Location": "The URI of the file that contains the IPSet.", "Name": "The user-friendly name to identify the IPSet.\n\nAllowed characters are alphanumeric, whitespace, dash (-), and underscores (_).", @@ -17916,7 +18813,7 @@ "Message": "Issue message that specifies the reason. For information about potential troubleshooting steps, see [Troubleshooting Malware Protection for S3 status issues](https://docs.aws.amazon.com/guardduty/latest/ug/troubleshoot-s3-malware-protection-status-errors.html) in the *GuardDuty User Guide* ." }, "AWS::GuardDuty::MalwareProtectionPlan CFNTagging": { - "Status": "Indicates whether or not you chose GuardDuty to add a predefined tag to the scanned S3 object." + "Status": "Indicates whether or not you chose GuardDuty to add a predefined tag to the scanned S3 object.\n\nPotential values include `ENABLED` and `DISABLED` . These values are case-sensitive." }, "AWS::GuardDuty::MalwareProtectionPlan S3Bucket": { "BucketName": "Name of the S3 bucket.", @@ -17927,7 +18824,7 @@ "Value": "The tag value." }, "AWS::GuardDuty::Master": { - "DetectorId": "The unique ID of the detector of the GuardDuty member account.", + "DetectorId": "The unique ID of the detector of the GuardDuty member account.\n\nTo find the `detectorId` in the current Region, see the\nSettings page in the GuardDuty console, or run the [ListDetectors](https://docs.aws.amazon.com/guardduty/latest/APIReference/API_ListDetectors.html) API.", "InvitationId": "The ID of the invitation that is sent to the account designated as a member account. You can find the invitation ID by running the [ListInvitations](https://docs.aws.amazon.com/guardduty/latest/APIReference/API_ListInvitations.html) in the *GuardDuty API Reference* .", "MasterId": "The AWS account ID of the account designated as the GuardDuty administrator account." }, @@ -17941,7 +18838,7 @@ }, "AWS::GuardDuty::ThreatIntelSet": { "Activate": "A Boolean value that indicates whether GuardDuty is to start using the uploaded ThreatIntelSet.", - "DetectorId": "The unique ID of the detector of the GuardDuty account that you want to create a threatIntelSet for.", + "DetectorId": "The unique ID of the detector of the GuardDuty account for which you want to create a `ThreatIntelSet` .\n\nTo find the `detectorId` in the current Region, see the\nSettings page in the GuardDuty console, or run the [ListDetectors](https://docs.aws.amazon.com/guardduty/latest/APIReference/API_ListDetectors.html) API.", "Format": "The format of the file that contains the ThreatIntelSet.", "Location": "The URI of the file that contains the ThreatIntelSet.", "Name": "A user-friendly ThreatIntelSet name displayed in all findings that are generated by activity that involves IP addresses included in this ThreatIntelSet.", @@ -18025,7 +18922,7 @@ "AWS::IAM::OIDCProvider": { "ClientIdList": "A list of client IDs (also known as audiences) that are associated with the specified IAM OIDC provider resource object. For more information, see [CreateOpenIDConnectProvider](https://docs.aws.amazon.com/IAM/latest/APIReference/API_CreateOpenIDConnectProvider.html) .", "Tags": "A list of tags that are attached to the specified IAM OIDC provider. The returned list of tags is sorted by tag key. For more information about tagging, see [Tagging IAM resources](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_tags.html) in the *IAM User Guide* .", - "ThumbprintList": "A list of certificate thumbprints that are associated with the specified IAM OIDC provider resource object. For more information, see [CreateOpenIDConnectProvider](https://docs.aws.amazon.com/IAM/latest/APIReference/API_CreateOpenIDConnectProvider.html) .", + "ThumbprintList": "A list of certificate thumbprints that are associated with the specified IAM OIDC provider resource object. For more information, see [CreateOpenIDConnectProvider](https://docs.aws.amazon.com/IAM/latest/APIReference/API_CreateOpenIDConnectProvider.html) .\n\nThis property is optional. If it is not included, IAM will retrieve and use the top intermediate certificate authority (CA) thumbprint of the OpenID Connect identity provider server certificate.", "Url": "The URL that the IAM OIDC provider resource object is associated with. For more information, see [CreateOpenIDConnectProvider](https://docs.aws.amazon.com/IAM/latest/APIReference/API_CreateOpenIDConnectProvider.html) ." }, "AWS::IAM::OIDCProvider Tag": { @@ -18179,6 +19076,15 @@ "Key": "One part of a key-value pair that makes up a tag. A `key` is a general label that acts like a category for more specific tag values.", "Value": "The optional part of a key-value pair that makes up a tag. A `value` acts as a descriptor within a tag category (key)." }, + "AWS::IVS::PublicKey": { + "Name": "Public key name. The value does not need to be unique.", + "PublicKeyMaterial": "The public portion of a customer-generated key pair.", + "Tags": "An array of key-value pairs to apply to this resource." + }, + "AWS::IVS::PublicKey Tag": { + "Key": "One part of a key-value pair that makes up a tag. A `key` is a general label that acts like a category for more specific tag values.", + "Value": "The optional part of a key-value pair that makes up a tag. A `value` acts as a descriptor within a tag category (key)." + }, "AWS::IVS::RecordingConfiguration": { "DestinationConfiguration": "A destination configuration describes an S3 bucket where recorded video will be stored. See the DestinationConfiguration property type for more information.", "Name": "Recording-configuration name. The value does not need to be unique.", @@ -18208,9 +19114,14 @@ "TargetIntervalSeconds": "The targeted thumbnail-generation interval in seconds. This is configurable (and required) only if [RecordingMode](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ivs-recordingconfiguration-thumbnailconfiguration.html#cfn-ivs-recordingconfiguration-thumbnailconfiguration-recordingmode) is `INTERVAL` .\n\n> Setting a value for `TargetIntervalSeconds` does not guarantee that thumbnails are generated at the specified interval. For thumbnails to be generated at the `TargetIntervalSeconds` interval, the `IDR/Keyframe` value for the input video must be less than the `TargetIntervalSeconds` value. See [Amazon IVS Streaming Configuration](https://docs.aws.amazon.com/ivs/latest/LowLatencyUserGuide/streaming-config.html) for information on setting `IDR/Keyframe` to the recommended value in video-encoder settings. \n\n*Default* : 60" }, "AWS::IVS::Stage": { + "AutoParticipantRecordingConfiguration": "", "Name": "Stage name.", "Tags": "An array of key-value pairs to apply to this resource.\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ivs-stage-tag.html) ." }, + "AWS::IVS::Stage AutoParticipantRecordingConfiguration": { + "MediaTypes": "Types of media to be recorded. Default: `AUDIO_VIDEO` .", + "StorageConfigurationArn": "ARN of the StorageConfiguration resource to use for individual participant recording. Default: \"\" (empty string, no storage configuration is specified). Individual participant recording cannot be started unless a storage configuration is specified, when a Stage is created or updated." + }, "AWS::IVS::Stage Tag": { "Key": "One part of a key-value pair that makes up a tag. A `key` is a general label that acts like a category for more specific tag values.", "Value": "The optional part of a key-value pair that makes up a tag. A `value` acts as a descriptor within a tag category (key)." @@ -18276,7 +19187,7 @@ }, "AWS::IdentityStore::Group": { "Description": "A string containing the description of the group.", - "DisplayName": "The display name value for the group. The length limit is 1,024 characters. This value can consist of letters, accented characters, symbols, numbers, punctuation, tab, new line, carriage return, space, and nonbreaking space in this attribute. This value is specified at the time the group is created and stored as an attribute of the group object in the identity store.", + "DisplayName": "The display name value for the group. The length limit is 1,024 characters. This value can consist of letters, accented characters, symbols, numbers, punctuation, tab, new line, carriage return, space, and nonbreaking space in this attribute. This value is specified at the time the group is created and stored as an attribute of the group object in the identity store.\n\nPrefix search supports a maximum of 1,000 characters for the string.", "IdentityStoreId": "The globally unique identifier for the identity store." }, "AWS::IdentityStore::GroupMembership": { @@ -18418,7 +19329,7 @@ "Workflows": "Contains an array of workflow configuration objects." }, "AWS::ImageBuilder::Image EcrConfiguration": { - "ContainerTags": "Tags for Image Builder to apply to the output container image that &INS; scans. Tags can help you identify and manage your scanned images.", + "ContainerTags": "Tags for Image Builder to apply to the output container image that Amazon Inspector scans. Tags can help you identify and manage your scanned images.", "RepositoryName": "The name of the container repository that Amazon Inspector scans to identify findings for your container images. The name includes the path for the repository location. If you don\u2019t provide this information, Image Builder creates a repository in your account named `image-builder-image-scanning-repository` for vulnerability scans of your output container images." }, "AWS::ImageBuilder::Image ImageScanningConfiguration": { @@ -18456,7 +19367,7 @@ "Workflows": "Contains the workflows that run for the image pipeline." }, "AWS::ImageBuilder::ImagePipeline EcrConfiguration": { - "ContainerTags": "Tags for Image Builder to apply to the output container image that &INS; scans. Tags can help you identify and manage your scanned images.", + "ContainerTags": "Tags for Image Builder to apply to the output container image that Amazon Inspector scans. Tags can help you identify and manage your scanned images.", "RepositoryName": "The name of the container repository that Amazon Inspector scans to identify findings for your container images. The name includes the path for the repository location. If you don\u2019t provide this information, Image Builder creates a repository in your account named `image-builder-image-scanning-repository` for vulnerability scans of your output container images." }, "AWS::ImageBuilder::ImagePipeline ImageScanningConfiguration": { @@ -18909,7 +19820,10 @@ "Value": "The tag's value." }, "AWS::IoT::DomainConfiguration": { + "ApplicationProtocol": "An enumerated string that speci\ufb01es the application-layer protocol.\n\n> This property isn't available in China.", + "AuthenticationType": "An enumerated string that speci\ufb01es the authentication type.\n\n> This property isn't available in China.", "AuthorizerConfig": "An object that specifies the authorization service for a domain.", + "ClientCertificateConfig": "An object that speci\ufb01es the client certificate con\ufb01guration for a domain.\n\n> This property isn't available in China.", "DomainConfigurationName": "The name of the domain configuration. This value must be unique to a region.", "DomainConfigurationStatus": "The status to which the domain configuration should be updated.\n\nValid values: `ENABLED` | `DISABLED`", "DomainName": "The name of the domain.", @@ -18924,6 +19838,9 @@ "AllowAuthorizerOverride": "A Boolean that specifies whether the domain configuration's authorization service can be overridden.", "DefaultAuthorizerName": "The name of the authorization service for a domain configuration." }, + "AWS::IoT::DomainConfiguration ClientCertificateConfig": { + "ClientCertificateCallbackArn": "The ARN of the Lambda function that IoT invokes after mutual TLS authentication during the connection.\n\n> This property isn't available in China." + }, "AWS::IoT::DomainConfiguration ServerCertificateConfig": { "EnableOCSPCheck": "A Boolean value that indicates whether Online Certificate Status Protocol (OCSP) server certificate check is enabled or not. For more information, see [Configurable endpoints](https://docs.aws.amazon.com//iot/latest/developerguide/iot-custom-endpoints-configurable.html) from the AWS IoT Core Developer Guide." }, @@ -20076,6 +20993,7 @@ "Priority": "(Optional) A number indicating the priority of one campaign over another campaign for a certain vehicle or fleet. A campaign with the lowest value is deployed to vehicles before any other campaigns. If it's not specified, `0` is used.\n\nDefault: `0`", "SignalCatalogArn": "The Amazon Resource Name (ARN) of the signal catalog associated with the campaign.", "SignalsToCollect": "(Optional) A list of information about signals to collect.", + "SignalsToFetch": "", "SpoolingMode": "(Optional) Whether to store collected data after a vehicle lost a connection with the cloud. After a connection is re-established, the data is automatically forwarded to AWS IoT FleetWise . If you want to store collected data when a vehicle loses connection with the cloud, use `TO_DISK` . If it's not specified, `OFF` is used.\n\nDefault: `OFF`", "StartTime": "(Optional) The time, in milliseconds, to deliver a campaign after it was approved. If it's not specified, `0` is used.\n\nDefault: `0`", "Tags": "(Optional) Metadata that can be used to manage the campaign.", @@ -20091,16 +21009,35 @@ "MinimumTriggerIntervalMs": "(Optional) The minimum duration of time between two triggering events to collect data, in milliseconds.\n\n> If a signal changes often, you might want to collect data at a slower rate.", "TriggerMode": "(Optional) Whether to collect data for all triggering events ( `ALWAYS` ). Specify ( `RISING_EDGE` ), or specify only when the condition first evaluates to false. For example, triggering on \"AirbagDeployed\"; Users aren't interested on triggering when the airbag is already exploded; they only care about the change from not deployed => deployed." }, + "AWS::IoTFleetWise::Campaign ConditionBasedSignalFetchConfig": { + "ConditionExpression": "", + "TriggerMode": "" + }, "AWS::IoTFleetWise::Campaign DataDestinationConfig": { + "MqttTopicConfig": "", "S3Config": "(Optional) The Amazon S3 bucket where the AWS IoT FleetWise campaign sends data.", "TimestreamConfig": "(Optional) The Amazon Timestream table where the campaign sends data." }, + "AWS::IoTFleetWise::Campaign MqttTopicConfig": { + "ExecutionRoleArn": "", + "MqttTopicArn": "" + }, "AWS::IoTFleetWise::Campaign S3Config": { "BucketArn": "The Amazon Resource Name (ARN) of the Amazon S3 bucket.", "DataFormat": "(Optional) Specify the format that files are saved in the Amazon S3 bucket. You can save files in an Apache Parquet or JSON format.\n\n- Parquet - Store data in a columnar storage file format. Parquet is optimal for fast data retrieval and can reduce costs. This option is selected by default.\n- JSON - Store data in a standard text-based JSON file format.", "Prefix": "(Optional) Enter an S3 bucket prefix. The prefix is the string of characters after the bucket name and before the object name. You can use the prefix to organize data stored in Amazon S3 buckets. For more information, see [Organizing objects using prefixes](https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-prefixes.html) in the *Amazon Simple Storage Service User Guide* .\n\nBy default, AWS IoT FleetWise sets the prefix `processed-data/year=YY/month=MM/date=DD/hour=HH/` (in UTC) to data it delivers to Amazon S3 . You can enter a prefix to append it to this default prefix. For example, if you enter the prefix `vehicles` , the prefix will be `vehicles/processed-data/year=YY/month=MM/date=DD/hour=HH/` .", "StorageCompressionFormat": "(Optional) By default, stored data is compressed as a .gzip file. Compressed files have a reduced file size, which can optimize the cost of data storage." }, + "AWS::IoTFleetWise::Campaign SignalFetchConfig": { + "ConditionBased": "", + "TimeBased": "" + }, + "AWS::IoTFleetWise::Campaign SignalFetchInformation": { + "Actions": "", + "ConditionLanguageVersion": "", + "FullyQualifiedName": "", + "SignalFetchConfig": "" + }, "AWS::IoTFleetWise::Campaign SignalInformation": { "MaxSampleCount": "(Optional) The maximum number of samples to collect.", "MinimumSamplingIntervalMs": "(Optional) The minimum duration of time (in milliseconds) between two triggering events to collect data.\n\n> If a signal changes often, you might want to collect data at a slower rate.", @@ -20113,6 +21050,9 @@ "AWS::IoTFleetWise::Campaign TimeBasedCollectionScheme": { "PeriodMs": "The time period (in milliseconds) to decide how often to collect data. For example, if the time period is `60000` , the Edge Agent software collects data once every minute." }, + "AWS::IoTFleetWise::Campaign TimeBasedSignalFetchConfig": { + "ExecutionFrequencyMs": "" + }, "AWS::IoTFleetWise::Campaign TimestreamConfig": { "ExecutionRoleArn": "The Amazon Resource Name (ARN) of the task execution role that grants AWS IoT FleetWise permission to deliver data to the Amazon Timestream table.", "TimestreamTableArn": "The Amazon Resource Name (ARN) of the Amazon Timestream table." @@ -20348,7 +21288,7 @@ "AssetModelDescription": "A description for the asset model.", "AssetModelExternalId": "The external ID of the asset model. For more information, see [Using external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-ids) in the *AWS IoT SiteWise User Guide* .", "AssetModelHierarchies": "The hierarchy definitions of the asset model. Each hierarchy specifies an asset model whose assets can be children of any other assets created from this asset model. For more information, see [Asset hierarchies](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/asset-hierarchies.html) in the *AWS IoT SiteWise User Guide* .\n\nYou can specify up to 10 hierarchies per asset model. For more information, see [Quotas](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/quotas.html) in the *AWS IoT SiteWise User Guide* .", - "AssetModelName": "A unique, friendly name for the asset model.", + "AssetModelName": "A unique name for the asset model.", "AssetModelProperties": "The property definitions of the asset model. For more information, see [Asset properties](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/asset-properties.html) in the *AWS IoT SiteWise User Guide* .\n\nYou can specify up to 200 properties per asset model. For more information, see [Quotas](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/quotas.html) in the *AWS IoT SiteWise User Guide* .", "AssetModelType": "The type of asset model.\n\n- *ASSET_MODEL* \u2013 (default) An asset model that you can use to create assets. Can't be included as a component in another asset model.\n- *COMPONENT_MODEL* \u2013 A reusable component that you can include in the composite models of other asset models. You can't create assets directly from this type of asset model.", "Tags": "A list of key-value pairs that contain metadata for the asset. For more information, see [Tagging your AWS IoT SiteWise resources](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/tag-resources.html) in the *AWS IoT SiteWise User Guide* ." @@ -20372,7 +21312,7 @@ "Name": "The name of the asset model hierarchy that you specify by using the [CreateAssetModel](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_CreateAssetModel.html) or [UpdateAssetModel](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_UpdateAssetModel.html) API operation." }, "AWS::IoTSiteWise::AssetModel AssetModelProperty": { - "DataType": "The data type of the asset model property.", + "DataType": "The data type of the asset model property.\n\nIf you specify `STRUCT` , you must also specify `dataTypeSpec` to identify the type of the structure for this property.", "DataTypeSpec": "The data type of the structure for this property. This parameter exists on properties that have the `STRUCT` data type.", "ExternalId": "The external ID of the asset property. For more information, see [Using external IDs](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/object-ids.html#external-ids) in the *AWS IoT SiteWise User Guide* .\n\n> One of `ExternalId` or `LogicalId` must be specified.", "Id": "The ID of the property.\n\n> This is a return value and can't be set.", @@ -20439,7 +21379,7 @@ }, "AWS::IoTSiteWise::Gateway": { "GatewayCapabilitySummaries": "A list of gateway capability summaries that each contain a namespace and status. Each gateway capability defines data sources for the gateway. To retrieve a capability configuration's definition, use [DescribeGatewayCapabilityConfiguration](https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_DescribeGatewayCapabilityConfiguration.html) .", - "GatewayName": "A unique, friendly name for the gateway.", + "GatewayName": "A unique name for the gateway.", "GatewayPlatform": "The gateway's platform. You can only specify one platform in a gateway.", "Tags": "A list of key-value pairs that contain metadata for the gateway. For more information, see [Tagging your AWS IoT SiteWise resources](https://docs.aws.amazon.com/iot-sitewise/latest/userguide/tag-resources.html) in the *AWS IoT SiteWise User Guide* ." }, @@ -20450,7 +21390,7 @@ "AWS::IoTSiteWise::Gateway GatewayPlatform": { "Greengrass": "A gateway that runs on AWS IoT Greengrass .", "GreengrassV2": "A gateway that runs on AWS IoT Greengrass V2 .", - "SiemensIE": "" + "SiemensIE": "A AWS IoT SiteWise Edge gateway that runs on a Siemens Industrial Edge Device." }, "AWS::IoTSiteWise::Gateway Greengrass": { "GroupArn": "The [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) of the Greengrass group. For more information about how to find a group's ARN, see [ListGroups](https://docs.aws.amazon.com/greengrass/v1/apireference/listgroups-get.html) and [GetGroup](https://docs.aws.amazon.com/greengrass/v1/apireference/getgroup-get.html) in the *AWS IoT Greengrass V1 API Reference* ." @@ -20459,7 +21399,7 @@ "CoreDeviceThingName": "The name of the AWS IoT thing for your AWS IoT Greengrass V2 core device." }, "AWS::IoTSiteWise::Gateway SiemensIE": { - "IotCoreThingName": "" + "IotCoreThingName": "The name of the AWS IoT Thing for your AWS IoT SiteWise Edge gateway." }, "AWS::IoTSiteWise::Gateway Tag": { "Key": "The key or name that identifies the tag.", @@ -20657,7 +21597,7 @@ "SceneId": "The ID of the scene.", "SceneMetadata": "The scene metadata.", "Tags": "The ComponentType tags.", - "WorkspaceId": "" + "WorkspaceId": "The ID of the workspace." }, "AWS::IoTTwinMaker::SyncJob": { "SyncRole": "The SyncJob IAM role. This IAM role is used by the sync job to read from the syncSource, and create, update or delete the corresponding resources.", @@ -21899,22 +22839,23 @@ "AmazonOpenSearchServerlessDestinationConfiguration": "Describes the configuration of a destination in the Serverless offering for Amazon OpenSearch Service.", "AmazonopensearchserviceDestinationConfiguration": "The destination in Amazon OpenSearch Service. You can specify only one destination.", "DeliveryStreamEncryptionConfigurationInput": "Specifies the type and Amazon Resource Name (ARN) of the CMK to use for Server-Side Encryption (SSE).", - "DeliveryStreamName": "The name of the delivery stream.", - "DeliveryStreamType": "The delivery stream type. This can be one of the following values:\n\n- `DirectPut` : Provider applications access the delivery stream directly.\n- `KinesisStreamAsSource` : The delivery stream uses a Kinesis data stream as a source.", + "DeliveryStreamName": "The name of the Firehose stream.", + "DeliveryStreamType": "The Firehose stream type. This can be one of the following values:\n\n- `DirectPut` : Provider applications access the Firehose stream directly.\n- `KinesisStreamAsSource` : The Firehose stream uses a Kinesis data stream as a source.", "ElasticsearchDestinationConfiguration": "An Amazon ES destination for the delivery stream.\n\nConditional. You must specify only one destination configuration.\n\nIf you change the delivery stream destination from an Amazon ES destination to an Amazon S3 or Amazon Redshift destination, update requires [some interruptions](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-some-interrupt) .", "ExtendedS3DestinationConfiguration": "An Amazon S3 destination for the delivery stream.\n\nConditional. You must specify only one destination configuration.\n\nIf you change the delivery stream destination from an Amazon Extended S3 destination to an Amazon ES destination, update requires [some interruptions](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-some-interrupt) .", "HttpEndpointDestinationConfiguration": "Enables configuring Kinesis Firehose to deliver data to any HTTP endpoint destination. You can specify only one destination.", + "IcebergDestinationConfiguration": "Specifies the destination configure settings for Apache Iceberg Table.", "KinesisStreamSourceConfiguration": "When a Kinesis stream is used as the source for the delivery stream, a [KinesisStreamSourceConfiguration](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisfirehose-deliverystream-kinesisstreamsourceconfiguration.html) containing the Kinesis stream ARN and the role ARN for the source stream.", "MSKSourceConfiguration": "The configuration for the Amazon MSK cluster to be used as the source for a delivery stream.", "RedshiftDestinationConfiguration": "An Amazon Redshift destination for the delivery stream.\n\nConditional. You must specify only one destination configuration.\n\nIf you change the delivery stream destination from an Amazon Redshift destination to an Amazon ES destination, update requires [some interruptions](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-some-interrupt) .", "S3DestinationConfiguration": "The `S3DestinationConfiguration` property type specifies an Amazon Simple Storage Service (Amazon S3) destination to which Amazon Kinesis Data Firehose (Kinesis Data Firehose) delivers data.\n\nConditional. You must specify only one destination configuration.\n\nIf you change the delivery stream destination from an Amazon S3 destination to an Amazon ES destination, update requires [some interruptions](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-some-interrupt) .", "SnowflakeDestinationConfiguration": "Configure Snowflake destination", "SplunkDestinationConfiguration": "The configuration of a destination in Splunk for the delivery stream.", - "Tags": "A set of tags to assign to the delivery stream. A tag is a key-value pair that you can define and assign to AWS resources. Tags are metadata. For example, you can add friendly names and descriptions or other types of information that can help you distinguish the delivery stream. For more information about tags, see [Using Cost Allocation Tags](https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html) in the AWS Billing and Cost Management User Guide.\n\nYou can specify up to 50 tags when creating a delivery stream.\n\nIf you specify tags in the `CreateDeliveryStream` action, Amazon Data Firehose performs an additional authorization on the `firehose:TagDeliveryStream` action to verify if users have permissions to create tags. If you do not provide this permission, requests to create new Firehose delivery streams with IAM resource tags will fail with an `AccessDeniedException` such as following.\n\n*AccessDeniedException*\n\nUser: arn:aws:sts::x:assumed-role/x/x is not authorized to perform: firehose:TagDeliveryStream on resource: arn:aws:firehose:us-east-1:x:deliverystream/x with an explicit deny in an identity-based policy.\n\nFor an example IAM policy, see [Tag example.](https://docs.aws.amazon.com/firehose/latest/APIReference/API_CreateDeliveryStream.html#API_CreateDeliveryStream_Examples)" + "Tags": "A set of tags to assign to the Firehose stream. A tag is a key-value pair that you can define and assign to AWS resources. Tags are metadata. For example, you can add friendly names and descriptions or other types of information that can help you distinguish the Firehose stream. For more information about tags, see [Using Cost Allocation Tags](https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html) in the AWS Billing and Cost Management User Guide.\n\nYou can specify up to 50 tags when creating a Firehose stream.\n\nIf you specify tags in the `CreateDeliveryStream` action, Amazon Data Firehose performs an additional authorization on the `firehose:TagDeliveryStream` action to verify if users have permissions to create tags. If you do not provide this permission, requests to create new Firehose Firehose streams with IAM resource tags will fail with an `AccessDeniedException` such as following.\n\n*AccessDeniedException*\n\nUser: arn:aws:sts::x:assumed-role/x/x is not authorized to perform: firehose:TagDeliveryStream on resource: arn:aws:firehose:us-east-1:x:deliverystream/x with an explicit deny in an identity-based policy.\n\nFor an example IAM policy, see [Tag example.](https://docs.aws.amazon.com/firehose/latest/APIReference/API_CreateDeliveryStream.html#API_CreateDeliveryStream_Examples)" }, "AWS::KinesisFirehose::DeliveryStream AmazonOpenSearchServerlessBufferingHints": { "IntervalInSeconds": "Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300 (5 minutes).", - "SizeInMBs": "Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5.\n\nWe recommend setting this parameter to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec, the value should be 10 MB or higher." + "SizeInMBs": "Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5.\n\nWe recommend setting this parameter to a value greater than the amount of data you typically ingest into the Firehose stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec, the value should be 10 MB or higher." }, "AWS::KinesisFirehose::DeliveryStream AmazonOpenSearchServerlessDestinationConfiguration": { "BufferingHints": "The buffering options. If no value is specified, the default values for AmazonopensearchserviceBufferingHints are used.", @@ -21962,6 +22903,9 @@ "IntervalInSeconds": "The length of time, in seconds, that Kinesis Data Firehose buffers incoming data before delivering it to the destination. For valid values, see the `IntervalInSeconds` content for the [BufferingHints](https://docs.aws.amazon.com/firehose/latest/APIReference/API_BufferingHints.html) data type in the *Amazon Kinesis Data Firehose API Reference* .", "SizeInMBs": "The size of the buffer, in MBs, that Kinesis Data Firehose uses for incoming data before delivering it to the destination. For valid values, see the `SizeInMBs` content for the [BufferingHints](https://docs.aws.amazon.com/firehose/latest/APIReference/API_BufferingHints.html) data type in the *Amazon Kinesis Data Firehose API Reference* ." }, + "AWS::KinesisFirehose::DeliveryStream CatalogConfiguration": { + "CatalogArn": "Specifies the Glue catalog ARN identifier of the destination Apache Iceberg Tables. You must specify the ARN in the format `arn:aws:glue:region:account-id:catalog` ." + }, "AWS::KinesisFirehose::DeliveryStream CloudWatchLoggingOptions": { "Enabled": "Indicates whether CloudWatch Logs logging is enabled.", "LogGroupName": "The name of the CloudWatch Logs log group that contains the log stream that Kinesis Data Firehose will use.\n\nConditional. If you enable logging, you must specify this property.", @@ -21986,6 +22930,12 @@ "HiveJsonSerDe": "The native Hive / HCatalog JsonSerDe. Used by Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the OpenX SerDe.", "OpenXJsonSerDe": "The OpenX SerDe. Used by Firehose for deserializing data, which means converting it from the JSON format in preparation for serializing it to the Parquet or ORC format. This is one of two deserializers you can choose, depending on which one offers the functionality you need. The other option is the native Hive / HCatalog JsonSerDe." }, + "AWS::KinesisFirehose::DeliveryStream DestinationTableConfiguration": { + "DestinationDatabaseName": "The name of the Apache Iceberg database.", + "DestinationTableName": "Specifies the name of the Apache Iceberg Table.", + "S3ErrorOutputPrefix": "The table specific S3 error output prefix. All the errors that occurred while delivering to this table will be prefixed with this value in S3 destination.", + "UniqueKeys": "A list of unique keys for a given Apache Iceberg table. Firehose will use these for running Create, Update, or Delete operations on the given Iceberg table." + }, "AWS::KinesisFirehose::DeliveryStream DocumentIdOptions": { "DefaultDocumentIdFormat": "When the `FIREHOSE_DEFAULT` option is chosen, Firehose generates a unique document ID for each record based on a unique internal identifier. The generated document ID is stable across multiple delivery attempts, which helps prevent the same record from being indexed multiple times with different document IDs.\n\nWhen the `NO_DOCUMENT_ID` option is chosen, Firehose does not include any document IDs in the requests it sends to the Amazon OpenSearch Service. This causes the Amazon OpenSearch Service domain to generate document IDs. In case of multiple delivery attempts, this may cause the same record to be indexed more than once with different document IDs. This option enables write-heavy operations, such as the ingestion of logs and observability data, to consume less resources in the Amazon OpenSearch Service domain, resulting in improved performance." }, @@ -22023,7 +22973,7 @@ "AWS::KinesisFirehose::DeliveryStream ExtendedS3DestinationConfiguration": { "BucketARN": "The Amazon Resource Name (ARN) of the Amazon S3 bucket. For constraints, see [ExtendedS3DestinationConfiguration](https://docs.aws.amazon.com/firehose/latest/APIReference/API_ExtendedS3DestinationConfiguration.html) in the *Amazon Kinesis Data Firehose API Reference* .", "BufferingHints": "The buffering option.", - "CloudWatchLoggingOptions": "The Amazon CloudWatch logging options for your delivery stream.", + "CloudWatchLoggingOptions": "The Amazon CloudWatch logging options for your Firehose stream.", "CompressionFormat": "The compression format. If no value is specified, the default is `UNCOMPRESSED` .", "CustomTimeZone": "The time zone you prefer. UTC is the default.", "DataFormatConversionConfiguration": "The serializer, deserializer, and schema for converting data from the JSON format to the Parquet or ORC format before writing it to Amazon S3.", @@ -22035,7 +22985,7 @@ "ProcessingConfiguration": "The data processing configuration for the Kinesis Data Firehose delivery stream.", "RoleARN": "The Amazon Resource Name (ARN) of the AWS credentials. For constraints, see [ExtendedS3DestinationConfiguration](https://docs.aws.amazon.com/firehose/latest/APIReference/API_ExtendedS3DestinationConfiguration.html) in the *Amazon Kinesis Data Firehose API Reference* .", "S3BackupConfiguration": "The configuration for backup in Amazon S3.", - "S3BackupMode": "The Amazon S3 backup mode. After you create a delivery stream, you can update it to enable Amazon S3 backup if it is disabled. If backup is enabled, you can't update the delivery stream to disable it." + "S3BackupMode": "The Amazon S3 backup mode. After you create a Firehose stream, you can update it to enable Amazon S3 backup if it is disabled. If backup is enabled, you can't update the Firehose stream to disable it." }, "AWS::KinesisFirehose::DeliveryStream HiveJsonSerDe": { "TimestampFormats": "Indicates how you want Firehose to parse the date and timestamps that may be present in your input data JSON. To specify these format strings, follow the pattern syntax of JodaTime's DateTimeFormat format strings. For more information, see [Class DateTimeFormat](https://docs.aws.amazon.com/https://www.joda.org/joda-time/apidocs/org/joda/time/format/DateTimeFormat.html) . You can also use the special value `millis` to parse timestamps in epoch milliseconds. If you don't specify a format, Firehose uses `java.sql.Timestamp::valueOf` by default." @@ -22065,6 +23015,17 @@ "CommonAttributes": "Describes the metadata sent to the HTTP endpoint destination.", "ContentEncoding": "Kinesis Data Firehose uses the content encoding to compress the body of a request before sending the request to the destination. For more information, see Content-Encoding in MDN Web Docs, the official Mozilla documentation." }, + "AWS::KinesisFirehose::DeliveryStream IcebergDestinationConfiguration": { + "BufferingHints": "", + "CatalogConfiguration": "Configuration describing where the destination Apache Iceberg Tables are persisted.", + "CloudWatchLoggingOptions": "", + "DestinationTableConfigurationList": "Provides a list of `DestinationTableConfigurations` which Firehose uses to deliver data to Apache Iceberg Tables. Firehose will write data with insert if table specific configuration is not provided here.", + "ProcessingConfiguration": "", + "RetryOptions": "", + "RoleARN": "The Amazon Resource Name (ARN) of the IAM role to be assumed by Firehose for calling Apache Iceberg Tables.", + "S3Configuration": "", + "s3BackupMode": "Describes how Firehose will backup records. Currently,S3 backup only supports `FailedDataOnly` ." + }, "AWS::KinesisFirehose::DeliveryStream InputFormatConfiguration": { "Deserializer": "Specifies which deserializer to use. You can choose either the Apache Hive JSON SerDe or the OpenX JSON SerDe. If both are non-null, the server rejects the request." }, @@ -22078,6 +23039,7 @@ "AWS::KinesisFirehose::DeliveryStream MSKSourceConfiguration": { "AuthenticationConfiguration": "The authentication configuration of the Amazon MSK cluster.", "MSKClusterARN": "The ARN of the Amazon MSK cluster.", + "ReadFromTimestamp": "The start date and time in UTC for the offset position within your MSK topic from where Firehose begins to read. By default, this is set to timestamp when Firehose becomes Active.\n\nIf you want to create a Firehose stream with Earliest start position from SDK or CLI, you need to set the `ReadFromTimestamp` parameter to Epoch (1970-01-01T00:00:00Z).", "TopicName": "The topic name within the Amazon MSK cluster." }, "AWS::KinesisFirehose::DeliveryStream OpenXJsonSerDe": { @@ -22121,7 +23083,7 @@ "ParameterValue": "The parameter value." }, "AWS::KinesisFirehose::DeliveryStream RedshiftDestinationConfiguration": { - "CloudWatchLoggingOptions": "The CloudWatch logging options for your delivery stream.", + "CloudWatchLoggingOptions": "The CloudWatch logging options for your Firehose stream.", "ClusterJDBCURL": "The connection string that Kinesis Data Firehose uses to connect to the Amazon Redshift cluster.", "CopyCommand": "Configures the Amazon Redshift `COPY` command that Kinesis Data Firehose uses to load data into the cluster from the Amazon S3 bucket.", "Password": "The password for the Amazon Redshift user that you specified in the `Username` property.", @@ -22129,7 +23091,7 @@ "RetryOptions": "The retry behavior in case Firehose is unable to deliver documents to Amazon Redshift. Default value is 3600 (60 minutes).", "RoleARN": "The ARN of the AWS Identity and Access Management (IAM) role that grants Kinesis Data Firehose access to your Amazon S3 bucket and AWS KMS (if you enable data encryption). For more information, see [Grant Kinesis Data Firehose Access to an Amazon Redshift Destination](https://docs.aws.amazon.com/firehose/latest/dev/controlling-access.html#using-iam-rs) in the *Amazon Kinesis Data Firehose Developer Guide* .", "S3BackupConfiguration": "The configuration for backup in Amazon S3.", - "S3BackupMode": "The Amazon S3 backup mode. After you create a delivery stream, you can update it to enable Amazon S3 backup if it is disabled. If backup is enabled, you can't update the delivery stream to disable it.", + "S3BackupMode": "The Amazon S3 backup mode. After you create a Firehose stream, you can update it to enable Amazon S3 backup if it is disabled. If backup is enabled, you can't update the Firehose stream to disable it.", "S3Configuration": "The S3 bucket where Kinesis Data Firehose first delivers data. After the data is in the bucket, Kinesis Data Firehose uses the `COPY` command to load the data into the Amazon Redshift cluster. For the Amazon S3 bucket's compression format, don't specify `SNAPPY` or `ZIP` because the Amazon Redshift `COPY` command doesn't support them.", "SecretsManagerConfiguration": "The configuration that defines how you access secrets for Amazon Redshift.", "Username": "The Amazon Redshift user that has permission to access the Amazon Redshift cluster. This user must have `INSERT` privileges for copying data from the Amazon S3 bucket to the cluster." @@ -22143,7 +23105,7 @@ "AWS::KinesisFirehose::DeliveryStream S3DestinationConfiguration": { "BucketARN": "The Amazon Resource Name (ARN) of the Amazon S3 bucket to send data to.", "BufferingHints": "Configures how Kinesis Data Firehose buffers incoming data while delivering it to the Amazon S3 bucket.", - "CloudWatchLoggingOptions": "The CloudWatch logging options for your delivery stream.", + "CloudWatchLoggingOptions": "The CloudWatch logging options for your Firehose stream.", "CompressionFormat": "The type of compression that Kinesis Data Firehose uses to compress the data that it delivers to the Amazon S3 bucket. For valid values, see the `CompressionFormat` content for the [S3DestinationConfiguration](https://docs.aws.amazon.com/firehose/latest/APIReference/API_S3DestinationConfiguration.html) data type in the *Amazon Kinesis Data Firehose API Reference* .", "EncryptionConfiguration": "Configures Amazon Simple Storage Service (Amazon S3) server-side encryption. Kinesis Data Firehose uses AWS Key Management Service ( AWS KMS) to encrypt the data that it delivers to your Amazon S3 bucket.", "ErrorOutputPrefix": "A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see [Custom Prefixes for Amazon S3 Objects](https://docs.aws.amazon.com/firehose/latest/dev/s3-prefixes.html) .", @@ -22159,16 +23121,21 @@ "VersionId": "Specifies the table version for the output data schema. If you don't specify this version ID, or if you set it to `LATEST` , Firehose uses the most recent version. This means that any updates to the table are automatically picked up." }, "AWS::KinesisFirehose::DeliveryStream SecretsManagerConfiguration": { - "Enabled": "Specifies whether you want to use the the secrets manager feature. When set as `True` the secrets manager configuration overwrites the existing secrets in the destination configuration. When it's set to `False` Firehose falls back to the credentials in the destination configuration.", + "Enabled": "Specifies whether you want to use the secrets manager feature. When set as `True` the secrets manager configuration overwrites the existing secrets in the destination configuration. When it's set to `False` Firehose falls back to the credentials in the destination configuration.", "RoleARN": "Specifies the role that Firehose assumes when calling the Secrets Manager API operation. When you provide the role, it overrides any destination specific role defined in the destination configuration. If you do not provide the then we use the destination specific role. This parameter is required for Splunk.", - "SecretARN": "The ARN of the secret that stores your credentials. It must be in the same region as the Firehose stream and the role. The secret ARN can reside in a different account than the delivery stream and role as Firehose supports cross-account secret access. This parameter is required when *Enabled* is set to `True` ." + "SecretARN": "The ARN of the secret that stores your credentials. It must be in the same region as the Firehose stream and the role. The secret ARN can reside in a different account than the Firehose stream and role as Firehose supports cross-account secret access. This parameter is required when *Enabled* is set to `True` ." }, "AWS::KinesisFirehose::DeliveryStream Serializer": { "OrcSerDe": "A serializer to use for converting data to the ORC format before storing it in Amazon S3. For more information, see [Apache ORC](https://docs.aws.amazon.com/https://orc.apache.org/docs/) .", "ParquetSerDe": "A serializer to use for converting data to the Parquet format before storing it in Amazon S3. For more information, see [Apache Parquet](https://docs.aws.amazon.com/https://parquet.apache.org/documentation/latest/) ." }, + "AWS::KinesisFirehose::DeliveryStream SnowflakeBufferingHints": { + "IntervalInSeconds": "Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 0.", + "SizeInMBs": "Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 128." + }, "AWS::KinesisFirehose::DeliveryStream SnowflakeDestinationConfiguration": { "AccountUrl": "URL for accessing your Snowflake account. This URL must include your [account identifier](https://docs.aws.amazon.com/https://docs.snowflake.com/en/user-guide/admin-account-identifier) . Note that the protocol (https://) and port number are optional.", + "BufferingHints": "Describes the buffering to perform before delivering data to the Snowflake destination. If you do not specify any value, Firehose uses the default values.", "CloudWatchLoggingOptions": "", "ContentColumnName": "The name of the record content column", "DataLoadingOption": "Choose to load JSON keys mapped to table column names or choose to split the JSON payload where content is mapped to a record content column and source metadata is mapped to a record metadata column.", @@ -22176,7 +23143,7 @@ "KeyPassphrase": "Passphrase to decrypt the private key when the key is encrypted. For information, see [Using Key Pair Authentication & Key Rotation](https://docs.aws.amazon.com/https://docs.snowflake.com/en/user-guide/data-load-snowpipe-streaming-configuration#using-key-pair-authentication-key-rotation) .", "MetaDataColumnName": "The name of the record metadata column", "PrivateKey": "The private key used to encrypt your Snowflake client. For information, see [Using Key Pair Authentication & Key Rotation](https://docs.aws.amazon.com/https://docs.snowflake.com/en/user-guide/data-load-snowpipe-streaming-configuration#using-key-pair-authentication-key-rotation) .", - "ProcessingConfiguration": "", + "ProcessingConfiguration": "Specifies configuration for Snowflake.", "RetryOptions": "The time period where Firehose will retry sending data to the chosen HTTP endpoint.", "RoleARN": "The Amazon Resource Name (ARN) of the Snowflake role", "S3BackupMode": "Choose an S3 backup mode", @@ -22204,7 +23171,7 @@ }, "AWS::KinesisFirehose::DeliveryStream SplunkDestinationConfiguration": { "BufferingHints": "The buffering options. If no value is specified, the default values for Splunk are used.", - "CloudWatchLoggingOptions": "The Amazon CloudWatch logging options for your delivery stream.", + "CloudWatchLoggingOptions": "The Amazon CloudWatch logging options for your Firehose stream.", "HECAcknowledgmentTimeoutInSeconds": "The amount of time that Firehose waits to receive an acknowledgment from Splunk after it sends it data. At the end of the timeout period, Firehose either tries to send the data again or considers it an error, based on your retry settings.", "HECEndpoint": "The HTTP Event Collector (HEC) endpoint to which Firehose sends your data.", "HECEndpointType": "This type can be either `Raw` or `Event` .", @@ -22451,7 +23418,8 @@ "AWS::Lambda::CodeSigningConfig": { "AllowedPublishers": "List of allowed publishers.", "CodeSigningPolicies": "The code signing policy controls the validation failure action for signature mismatch or expiry.", - "Description": "Code signing configuration description." + "Description": "Code signing configuration description.", + "Tags": "A list of tags to add to the code signing configuration.\n\n> You must have the `lambda:TagResource` , `lambda:UntagResource` , and `lambda:ListTags` permissions for your [IAM principal](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts.html) to manage the AWS CloudFormation stack. If you don't have these permissions, there might be unexpected behavior with stack-level tags propagating to the resource during resource creation and update." }, "AWS::Lambda::CodeSigningConfig AllowedPublishers": { "SigningProfileVersionArns": "The Amazon Resource Name (ARN) for each of the signing profiles. A signing profile defines a trusted user who can sign a code package." @@ -22459,6 +23427,10 @@ "AWS::Lambda::CodeSigningConfig CodeSigningPolicies": { "UntrustedArtifactOnDeployment": "Code signing configuration policy for deployment validation failure. If you set the policy to `Enforce` , Lambda blocks the deployment request if signature validation checks fail. If you set the policy to `Warn` , Lambda allows the deployment and creates a CloudWatch log.\n\nDefault value: `Warn`" }, + "AWS::Lambda::CodeSigningConfig Tag": { + "Key": "The key for this tag.", + "Value": "The value for this tag." + }, "AWS::Lambda::EventInvokeConfig": { "DestinationConfig": "A destination for events after they have been sent to a function for processing.\n\n**Destinations** - *Function* - The Amazon Resource Name (ARN) of a Lambda function.\n- *Queue* - The ARN of a standard SQS queue.\n- *Topic* - The ARN of a standard SNS topic.\n- *Event Bus* - The ARN of an Amazon EventBridge event bus.", "FunctionName": "The name of the Lambda function.\n\n*Minimum* : `1`\n\n*Maximum* : `64`\n\n*Pattern* : `([a-zA-Z0-9-_]+)`", @@ -22486,7 +23458,8 @@ "EventSourceArn": "The Amazon Resource Name (ARN) of the event source.\n\n- *Amazon Kinesis* \u2013 The ARN of the data stream or a stream consumer.\n- *Amazon DynamoDB Streams* \u2013 The ARN of the stream.\n- *Amazon Simple Queue Service* \u2013 The ARN of the queue.\n- *Amazon Managed Streaming for Apache Kafka* \u2013 The ARN of the cluster or the ARN of the VPC connection (for [cross-account event source mappings](https://docs.aws.amazon.com/lambda/latest/dg/with-msk.html#msk-multi-vpc) ).\n- *Amazon MQ* \u2013 The ARN of the broker.\n- *Amazon DocumentDB* \u2013 The ARN of the DocumentDB change stream.", "FilterCriteria": "An object that defines the filter criteria that determine whether Lambda should process an event. For more information, see [Lambda event filtering](https://docs.aws.amazon.com/lambda/latest/dg/invocation-eventfiltering.html) .", "FunctionName": "The name or ARN of the Lambda function.\n\n**Name formats** - *Function name* \u2013 `MyFunction` .\n- *Function ARN* \u2013 `arn:aws:lambda:us-west-2:123456789012:function:MyFunction` .\n- *Version or Alias ARN* \u2013 `arn:aws:lambda:us-west-2:123456789012:function:MyFunction:PROD` .\n- *Partial ARN* \u2013 `123456789012:function:MyFunction` .\n\nThe length constraint applies only to the full ARN. If you specify only the function name, it's limited to 64 characters in length.", - "FunctionResponseTypes": "(Streams and SQS) A list of current response type enums applied to the event source mapping.\n\nValid Values: `ReportBatchItemFailures`", + "FunctionResponseTypes": "(Kinesis, DynamoDB Streams, and SQS) A list of current response type enums applied to the event source mapping.\n\nValid Values: `ReportBatchItemFailures`", + "KmsKeyArn": "The ARN of the AWS Key Management Service ( AWS KMS ) customer managed key that Lambda uses to encrypt your function's [filter criteria](https://docs.aws.amazon.com/lambda/latest/dg/invocation-eventfiltering.html#filtering-basics) .", "MaximumBatchingWindowInSeconds": "The maximum amount of time, in seconds, that Lambda spends gathering records before invoking the function.\n\n*Default ( Kinesis , DynamoDB , Amazon SQS event sources)* : 0\n\n*Default ( Amazon MSK , Kafka, Amazon MQ , Amazon DocumentDB event sources)* : 500 ms\n\n*Related setting:* For Amazon SQS event sources, when you set `BatchSize` to a value greater than 10, you must set `MaximumBatchingWindowInSeconds` to at least 1.", "MaximumRecordAgeInSeconds": "(Kinesis and DynamoDB Streams only) Discard records older than the specified age. The default value is -1,\nwhich sets the maximum age to infinite. When the value is set to infinite, Lambda never discards old records.\n\n> The minimum valid value for maximum record age is 60s. Although values less than 60 and greater than -1 fall within the parameter's absolute range, they are not allowed", "MaximumRetryAttempts": "(Kinesis and DynamoDB Streams only) Discard records after the specified number of retries. The default value is -1,\nwhich sets the maximum number of retries to infinite. When MaximumRetryAttempts is infinite, Lambda retries failed records until the record expires in the event source.", @@ -22498,6 +23471,7 @@ "SourceAccessConfigurations": "An array of the authentication protocol, VPC components, or virtual host to secure and define your event source.", "StartingPosition": "The position in a stream from which to start reading. Required for Amazon Kinesis and Amazon DynamoDB.\n\n- *LATEST* - Read only new records.\n- *TRIM_HORIZON* - Process all available records.\n- *AT_TIMESTAMP* - Specify a time from which to start reading records.", "StartingPositionTimestamp": "With `StartingPosition` set to `AT_TIMESTAMP` , the time from which to start reading, in Unix time seconds. `StartingPositionTimestamp` cannot be in the future.", + "Tags": "A list of tags to add to the event source mapping.\n\n> You must have the `lambda:TagResource` , `lambda:UntagResource` , and `lambda:ListTags` permissions for your [IAM principal](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts.html) to manage the AWS CloudFormation stack. If you don't have these permissions, there might be unexpected behavior with stack-level tags propagating to the resource during resource creation and update.", "Topics": "The name of the Kafka topic.", "TumblingWindowInSeconds": "(Kinesis and DynamoDB Streams only) The duration in seconds of a processing window for DynamoDB and Kinesis Streams event sources. A value of 0 seconds indicates no tumbling window." }, @@ -22537,9 +23511,13 @@ "Type": "The type of authentication protocol, VPC components, or virtual host for your event source. For example: `\"Type\":\"SASL_SCRAM_512_AUTH\"` .\n\n- `BASIC_AUTH` \u2013 (Amazon MQ) The AWS Secrets Manager secret that stores your broker credentials.\n- `BASIC_AUTH` \u2013 (Self-managed Apache Kafka) The Secrets Manager ARN of your secret key used for SASL/PLAIN authentication of your Apache Kafka brokers.\n- `VPC_SUBNET` \u2013 (Self-managed Apache Kafka) The subnets associated with your VPC. Lambda connects to these subnets to fetch data from your self-managed Apache Kafka cluster.\n- `VPC_SECURITY_GROUP` \u2013 (Self-managed Apache Kafka) The VPC security group used to manage access to your self-managed Apache Kafka brokers.\n- `SASL_SCRAM_256_AUTH` \u2013 (Self-managed Apache Kafka) The Secrets Manager ARN of your secret key used for SASL SCRAM-256 authentication of your self-managed Apache Kafka brokers.\n- `SASL_SCRAM_512_AUTH` \u2013 (Amazon MSK, Self-managed Apache Kafka) The Secrets Manager ARN of your secret key used for SASL SCRAM-512 authentication of your self-managed Apache Kafka brokers.\n- `VIRTUAL_HOST` \u2013- (RabbitMQ) The name of the virtual host in your RabbitMQ broker. Lambda uses this RabbitMQ host as the event source. This property cannot be specified in an UpdateEventSourceMapping API call.\n- `CLIENT_CERTIFICATE_TLS_AUTH` \u2013 (Amazon MSK, self-managed Apache Kafka) The Secrets Manager ARN of your secret key containing the certificate chain (X.509 PEM), private key (PKCS#8 PEM), and private key password (optional) used for mutual TLS authentication of your MSK/Apache Kafka brokers.\n- `SERVER_ROOT_CA_CERTIFICATE` \u2013 (Self-managed Apache Kafka) The Secrets Manager ARN of your secret key containing the root CA certificate (X.509 PEM) used for TLS encryption of your Apache Kafka brokers.", "URI": "The value for your chosen configuration in `Type` . For example: `\"URI\": \"arn:aws:secretsmanager:us-east-1:01234567890:secret:MyBrokerSecretName\"` ." }, + "AWS::Lambda::EventSourceMapping Tag": { + "Key": "The key for this tag.", + "Value": "The value for this tag." + }, "AWS::Lambda::Function": { "Architectures": "The instruction set architecture that the function supports. Enter a string array with one of the valid values (arm64 or x86_64). The default value is `x86_64` .", - "Code": "The code for the function.", + "Code": "The code for the function. You can define your function code in multiple ways:\n\n- For .zip deployment packages, you can specify the Amazon S3 location of the .zip file in the `S3Bucket` , `S3Key` , and `S3ObjectVersion` properties.\n- For .zip deployment packages, you can alternatively define the function code inline in the `ZipFile` property. This method works only for Node.js and Python functions.\n- For container images, specify the URI of your container image in the Amazon ECR registry in the `ImageUri` property.", "CodeSigningConfigArn": "To enable code signing for this function, specify the ARN of a code-signing configuration. A code-signing configuration\nincludes a set of signing profiles, which define the trusted publishers for this function.", "DeadLetterConfig": "A dead-letter queue configuration that specifies the queue or topic where Lambda sends asynchronous events when they fail processing. For more information, see [Dead-letter queues](https://docs.aws.amazon.com/lambda/latest/dg/invocation-async.html#invocation-dlq) .", "Description": "A description of the function.", @@ -22549,17 +23527,18 @@ "FunctionName": "The name of the Lambda function, up to 64 characters in length. If you don't specify a name, AWS CloudFormation generates one.\n\nIf you specify a name, you cannot perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you must replace the resource, specify a new name.", "Handler": "The name of the method within your code that Lambda calls to run your function. Handler is required if the deployment package is a .zip file archive. The format includes the file name. It can also include namespaces and other qualifiers, depending on the runtime. For more information, see [Lambda programming model](https://docs.aws.amazon.com/lambda/latest/dg/foundation-progmodel.html) .", "ImageConfig": "Configuration values that override the container image Dockerfile settings. For more information, see [Container image settings](https://docs.aws.amazon.com/lambda/latest/dg/images-create.html#images-parms) .", - "KmsKeyArn": "The ARN of the AWS Key Management Service ( AWS KMS ) customer managed key that's used to encrypt your function's [environment variables](https://docs.aws.amazon.com/lambda/latest/dg/configuration-envvars.html#configuration-envvars-encryption) . When [Lambda SnapStart](https://docs.aws.amazon.com/lambda/latest/dg/snapstart-security.html) is activated, Lambda also uses this key is to encrypt your function's snapshot. If you deploy your function using a container image, Lambda also uses this key to encrypt your function when it's deployed. Note that this is not the same key that's used to protect your container image in the Amazon Elastic Container Registry (Amazon ECR).\nIf you don't provide a customer managed key, Lambda uses a default service key.", + "KmsKeyArn": "The ARN of the AWS Key Management Service ( AWS KMS ) customer managed key that's used to encrypt your function's [environment variables](https://docs.aws.amazon.com/lambda/latest/dg/configuration-envvars.html#configuration-envvars-encryption) . When [Lambda SnapStart](https://docs.aws.amazon.com/lambda/latest/dg/snapstart-security.html) is activated, Lambda also uses this key is to encrypt your function's snapshot. If you deploy your function using a container image, Lambda also uses this key to encrypt your function when it's deployed. Note that this is not the same key that's used to protect your container image in the Amazon Elastic Container Registry ( Amazon ECR ). If you don't provide a customer managed key, Lambda uses a default service key.", "Layers": "A list of [function layers](https://docs.aws.amazon.com/lambda/latest/dg/configuration-layers.html) to add to the function's execution environment. Specify each layer by its ARN, including the version.", "LoggingConfig": "The function's Amazon CloudWatch Logs configuration settings.", "MemorySize": "The amount of [memory available to the function](https://docs.aws.amazon.com/lambda/latest/dg/configuration-function-common.html#configuration-memory-console) at runtime. Increasing the function memory also increases its CPU allocation. The default value is 128 MB. The value can be any multiple of 1 MB. Note that new AWS accounts have reduced concurrency and memory quotas. AWS raises these quotas automatically based on your usage. You can also request a quota increase.", "PackageType": "The type of deployment package. Set to `Image` for container image and set `Zip` for .zip file archive.", + "RecursiveLoop": "The status of your function's recursive loop detection configuration.\n\nWhen this value is set to `Allow` and Lambda detects your function being invoked as part of a recursive loop, it doesn't take any action.\n\nWhen this value is set to `Terminate` and Lambda detects your function being invoked as part of a recursive loop, it stops your function being invoked and notifies you.", "ReservedConcurrentExecutions": "The number of simultaneous executions to reserve for the function.", "Role": "The Amazon Resource Name (ARN) of the function's execution role.", "Runtime": "The identifier of the function's [runtime](https://docs.aws.amazon.com/lambda/latest/dg/lambda-runtimes.html) . Runtime is required if the deployment package is a .zip file archive. Specifying a runtime results in an error if you're deploying a function using a container image.\n\nThe following list includes deprecated runtimes. Lambda blocks creating new functions and updating existing functions shortly after each runtime is deprecated. For more information, see [Runtime use after deprecation](https://docs.aws.amazon.com/lambda/latest/dg/lambda-runtimes.html#runtime-deprecation-levels) .\n\nFor a list of all currently supported runtimes, see [Supported runtimes](https://docs.aws.amazon.com/lambda/latest/dg/lambda-runtimes.html#runtimes-supported) .", "RuntimeManagementConfig": "Sets the runtime management configuration for a function's version. For more information, see [Runtime updates](https://docs.aws.amazon.com/lambda/latest/dg/runtimes-update.html) .", "SnapStart": "The function's [AWS Lambda SnapStart](https://docs.aws.amazon.com/lambda/latest/dg/snapstart.html) setting.", - "Tags": "A list of [tags](https://docs.aws.amazon.com/lambda/latest/dg/tagging.html) to apply to the function.", + "Tags": "A list of [tags](https://docs.aws.amazon.com/lambda/latest/dg/tagging.html) to apply to the function.\n\n> You must have the `lambda:TagResource` , `lambda:UntagResource` , and `lambda:ListTags` permissions for your [IAM principal](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts.html) to manage the AWS CloudFormation stack. If you don't have these permissions, there might be unexpected behavior with stack-level tags propagating to the resource during resource creation and update.", "Timeout": "The amount of time (in seconds) that Lambda allows a function to run before stopping it. The default is 3 seconds. The maximum allowed value is 900 seconds. For more information, see [Lambda execution environment](https://docs.aws.amazon.com/lambda/latest/dg/runtimes-context.html) .", "TracingConfig": "Set `Mode` to `Active` to sample and trace a subset of incoming requests with [X-Ray](https://docs.aws.amazon.com/lambda/latest/dg/services-xray.html) .", "VpcConfig": "For network connectivity to AWS resources in a VPC, specify a list of security groups and subnets in the VPC. When you connect a function to a VPC, it can access resources and the internet only through that VPC. For more information, see [Configuring a Lambda function to access resources in a VPC](https://docs.aws.amazon.com/lambda/latest/dg/configuration-vpc.html) ." @@ -22569,6 +23548,7 @@ "S3Bucket": "An Amazon S3 bucket in the same AWS Region as your function. The bucket can be in a different AWS account .", "S3Key": "The Amazon S3 key of the deployment package.", "S3ObjectVersion": "For versioned objects, the version of the deployment package object to use.", + "SourceKMSKeyArn": "", "ZipFile": "(Node.js and Python) The source code of your Lambda function. If you include your function source inline with this parameter, AWS CloudFormation places it in a file named `index` and zips it to create a [deployment package](https://docs.aws.amazon.com/lambda/latest/dg/gettingstarted-package.html) . This zip file cannot exceed 4MB. For the `Handler` property, the first part of the handler identifier must be `index` . For example, `index.handler` .\n\nFor JSON, you must escape quotes and special characters such as newline ( `\\n` ) with a backslash.\n\nIf you specify a function that interacts with an AWS CloudFormation custom resource, you don't have to write your own functions to send responses to the custom resource that invoked the function. AWS CloudFormation provides a response module ( [cfn-response](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-lambda-function-code-cfnresponsemodule.html) ) that simplifies sending responses. See [Using AWS Lambda with AWS CloudFormation](https://docs.aws.amazon.com/lambda/latest/dg/services-cloudformation.html) for details." }, "AWS::Lambda::Function DeadLetterConfig": { @@ -22607,8 +23587,8 @@ "OptimizationStatus": "When you provide a [qualified Amazon Resource Name (ARN)](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html#versioning-versions-using) , this response element indicates whether SnapStart is activated for the specified function version." }, "AWS::Lambda::Function Tag": { - "Key": "", - "Value": "" + "Key": "The key for this tag.", + "Value": "The value for this tag." }, "AWS::Lambda::Function TracingConfig": { "Mode": "The tracing mode." @@ -22642,7 +23622,7 @@ "EventSourceToken": "For Alexa Smart Home functions, a token that the invoker must supply.", "FunctionName": "The name or ARN of the Lambda function, version, or alias.\n\n**Name formats** - *Function name* \u2013 `my-function` (name-only), `my-function:v1` (with alias).\n- *Function ARN* \u2013 `arn:aws:lambda:us-west-2:123456789012:function:my-function` .\n- *Partial ARN* \u2013 `123456789012:function:my-function` .\n\nYou can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.", "FunctionUrlAuthType": "The type of authentication that your function URL uses. Set to `AWS_IAM` if you want to restrict access to authenticated users only. Set to `NONE` if you want to bypass IAM authentication to create a public endpoint. For more information, see [Security and auth model for Lambda function URLs](https://docs.aws.amazon.com/lambda/latest/dg/urls-auth.html) .", - "Principal": "The AWS service or AWS account that invokes the function. If you specify a service, use `SourceArn` or `SourceAccount` to limit who can invoke the function through that service.", + "Principal": "The AWS service , AWS account , IAM user, or IAM role that invokes the function. If you specify a service, use `SourceArn` or `SourceAccount` to limit who can invoke the function through that service.", "PrincipalOrgID": "The identifier for your organization in AWS Organizations . Use this to grant permissions to all the AWS accounts under this organization.", "SourceAccount": "For AWS service , the ID of the AWS account that owns the resource. Use this together with `SourceArn` to ensure that the specified account owns the resource. It is possible for an Amazon S3 bucket to be deleted by its owner and recreated by another account.", "SourceArn": "For AWS services , the ARN of the AWS resource that invokes the function. For example, an Amazon S3 bucket or Amazon SNS topic.\n\nNote that Lambda configures the comparison using the `StringLike` operator." @@ -22652,7 +23632,7 @@ "Cors": "The [Cross-Origin Resource Sharing (CORS)](https://docs.aws.amazon.com/https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS) settings for your function URL.", "InvokeMode": "Use one of the following options:\n\n- `BUFFERED` \u2013 This is the default option. Lambda invokes your function using the `Invoke` API operation. Invocation results are available when the payload is complete. The maximum payload size is 6 MB.\n- `RESPONSE_STREAM` \u2013 Your function streams payload results as they become available. Lambda invokes your function using the `InvokeWithResponseStream` API operation. The maximum response payload size is 20 MB, however, you can [request a quota increase](https://docs.aws.amazon.com/servicequotas/latest/userguide/request-quota-increase.html) .", "Qualifier": "The alias name.", - "TargetFunctionArn": "The name of the Lambda function.\n\n**Name formats** - *Function name* - `my-function` .\n- *Function ARN* - `arn:aws:lambda:us-west-2:123456789012:function:my-function` .\n- *Partial ARN* - `123456789012:function:my-function` .\n\nThe length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length." + "TargetFunctionArn": "The name of the Lambda function.\n\n**Name formats** - *Function name* - `my-function` .\n- *Function ARN* - `lambda: : :function:my-function` .\n- *Partial ARN* - `:function:my-function` .\n\nThe length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length." }, "AWS::Lambda::Url Cors": { "AllowCredentials": "Whether you want to allow cookies or other credentials in requests to your function URL. The default is `false` .", @@ -22739,7 +23719,7 @@ "LocaleId": "The identifier of the language and locale that the bot will be used in. The string must match one of the supported locales.", "NluConfidenceThreshold": "Determines the threshold where Amazon Lex will insert the `AMAZON.FallbackIntent` , `AMAZON.KendraSearchIntent` , or both when returning alternative intents. You must configure an `AMAZON.FallbackIntent` . `AMAZON.KendraSearchIntent` is only inserted if it is configured for the bot.", "SlotTypes": "One or more slot types defined for the locale.", - "VoiceSettings": "Defines settings for using an Amazon Polly voice to communicate with a user." + "VoiceSettings": "Defines settings for using an Amazon Polly voice to communicate with a user.\n\nValid values include:\n\n- `standard`\n- `neural`\n- `long-form`\n- `generative`" }, "AWS::Lex::Bot Button": { "Text": "The text that appears on the button. Use this to tell the user what value is returned when they choose this button.", @@ -23707,7 +24687,7 @@ }, "AWS::Logs::QueryDefinition": { "LogGroupNames": "Use this parameter if you want the query to query only certain log groups.", - "Name": "A name for the query definition.\n\n> You can use the name to create a folder structure for your queries. To create a folder, use a forward slash (/) to prefix your desired query name with your desired folder name. For example, `/ *folder-name* / *query-name*` .", + "Name": "A name for the query definition.\n\n> You can use the name to create a folder structure for your queries. To create a folder, use a forward slash (/) to prefix your desired query name with your desired folder name. For example, `*folder-name* / *query-name*` .", "QueryString": "The query string to use for this query definition. For more information, see [CloudWatch Logs Insights Query Syntax](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CWL_QuerySyntax.html) ." }, "AWS::Logs::ResourcePolicy": { @@ -23918,147 +24898,147 @@ "SecretArnList": "" }, "AWS::MSK::Cluster": { - "BrokerNodeGroupInfo": "Information about the broker nodes in the cluster.", - "ClientAuthentication": "Includes all client authentication related information.", - "ClusterName": "The name of the cluster.", - "ConfigurationInfo": "Represents the configuration that you want MSK to use for the cluster.", - "CurrentVersion": "The version of the cluster that you want to update.", - "EncryptionInfo": "Includes all encryption-related information.", - "EnhancedMonitoring": "Specifies the level of monitoring for the MSK cluster. The possible values are `DEFAULT` , `PER_BROKER` , and `PER_TOPIC_PER_BROKER` .", - "KafkaVersion": "The version of Apache Kafka. You can use Amazon MSK to create clusters that use Apache Kafka versions 1.1.1 and 2.2.1.", - "LoggingInfo": "Logging Info details.", - "NumberOfBrokerNodes": "The number of broker nodes in the cluster.", - "OpenMonitoring": "The settings for open monitoring.", - "StorageMode": "This controls storage mode for supported storage tiers.", - "Tags": "Create tags when creating the cluster." + "BrokerNodeGroupInfo": "", + "ClientAuthentication": "", + "ClusterName": "", + "ConfigurationInfo": "", + "CurrentVersion": "", + "EncryptionInfo": "", + "EnhancedMonitoring": "", + "KafkaVersion": "", + "LoggingInfo": "", + "NumberOfBrokerNodes": "", + "OpenMonitoring": "", + "StorageMode": "", + "Tags": "" }, "AWS::MSK::Cluster BrokerLogs": { - "CloudWatchLogs": "Details of the CloudWatch Logs destination for broker logs.", - "Firehose": "Details of the Kinesis Data Firehose delivery stream that is the destination for broker logs.", - "S3": "Details of the Amazon S3 destination for broker logs." + "CloudWatchLogs": "", + "Firehose": "", + "S3": "" }, "AWS::MSK::Cluster BrokerNodeGroupInfo": { - "BrokerAZDistribution": "This parameter is currently not in use.", - "ClientSubnets": "The list of subnets to connect to in the client virtual private cloud (VPC). Amazon creates elastic network interfaces inside these subnets. Client applications use elastic network interfaces to produce and consume data.\n\nIf you use the US West (N. California) Region, specify exactly two subnets. For other Regions where Amazon MSK is available, you can specify either two or three subnets. The subnets that you specify must be in distinct Availability Zones. When you create a cluster, Amazon MSK distributes the broker nodes evenly across the subnets that you specify.\n\nClient subnets can't occupy the Availability Zone with ID `use1-az3` .", - "ConnectivityInfo": "Information about the cluster's connectivity setting.", + "BrokerAZDistribution": "", + "ClientSubnets": "", + "ConnectivityInfo": "", "InstanceType": "The type of Amazon EC2 instances to use for brokers. The following instance types are allowed: kafka.m5.large, kafka.m5.xlarge, kafka.m5.2xlarge, kafka.m5.4xlarge, kafka.m5.8xlarge, kafka.m5.12xlarge, kafka.m5.16xlarge, kafka.m5.24xlarge, and kafka.t3.small.", - "SecurityGroups": "The security groups to associate with the elastic network interfaces in order to specify who can connect to and communicate with the Amazon MSK cluster. If you don't specify a security group, Amazon MSK uses the default security group associated with the VPC. If you specify security groups that were shared with you, you must ensure that you have permissions to them. Specifically, you need the `ec2:DescribeSecurityGroups` permission.", - "StorageInfo": "Contains information about storage volumes attached to Amazon MSK broker nodes." + "SecurityGroups": "", + "StorageInfo": "" }, "AWS::MSK::Cluster ClientAuthentication": { - "Sasl": "Details for client authentication using SASL. To turn on SASL, you must also turn on `EncryptionInTransit` by setting `inCluster` to true. You must set `clientBroker` to either `TLS` or `TLS_PLAINTEXT` . If you choose `TLS_PLAINTEXT` , then you must also set `unauthenticated` to true.", - "Tls": "Details for ClientAuthentication using TLS. To turn on TLS access control, you must also turn on `EncryptionInTransit` by setting `inCluster` to true and `clientBroker` to `TLS` .", - "Unauthenticated": "Details for ClientAuthentication using no authentication." + "Sasl": "", + "Tls": "", + "Unauthenticated": "" }, "AWS::MSK::Cluster CloudWatchLogs": { - "Enabled": "Specifies whether broker logs get sent to the specified CloudWatch Logs destination.", - "LogGroup": "The CloudWatch log group that is the destination for broker logs." + "Enabled": "", + "LogGroup": "" }, "AWS::MSK::Cluster ConfigurationInfo": { - "Arn": "ARN of the configuration to use.", - "Revision": "The revision of the configuration to use." + "Arn": "", + "Revision": "" }, "AWS::MSK::Cluster ConnectivityInfo": { - "PublicAccess": "Access control settings for the cluster's brokers.", - "VpcConnectivity": "VPC connection control settings for brokers" + "PublicAccess": "", + "VpcConnectivity": "" }, "AWS::MSK::Cluster EBSStorageInfo": { - "ProvisionedThroughput": "EBS volume provisioned throughput information.", - "VolumeSize": "The size in GiB of the EBS volume for the data drive on each broker node." + "ProvisionedThroughput": "", + "VolumeSize": "" }, "AWS::MSK::Cluster EncryptionAtRest": { - "DataVolumeKMSKeyId": "The Amazon Resource Name (ARN) of the Amazon KMS key for encrypting data at rest. If you don't specify a KMS key, MSK creates one for you and uses it." + "DataVolumeKMSKeyId": "" }, "AWS::MSK::Cluster EncryptionInTransit": { - "ClientBroker": "Indicates the encryption setting for data in transit between clients and brokers. You must set it to one of the following values.\n\n`TLS` means that client-broker communication is enabled with TLS only.\n\n`TLS_PLAINTEXT` means that client-broker communication is enabled for both TLS-encrypted, as well as plaintext data.\n\n`PLAINTEXT` means that client-broker communication is enabled in plaintext only.\n\nThe default value is `TLS` .", - "InCluster": "When set to true, it indicates that data communication among the broker nodes of the cluster is encrypted. When set to false, the communication happens in plaintext.\n\nThe default value is true." + "ClientBroker": "", + "InCluster": "" }, "AWS::MSK::Cluster EncryptionInfo": { - "EncryptionAtRest": "The data-volume encryption details.", - "EncryptionInTransit": "The details for encryption in transit." + "EncryptionAtRest": "", + "EncryptionInTransit": "" }, "AWS::MSK::Cluster Firehose": { - "DeliveryStream": "The Kinesis Data Firehose delivery stream that is the destination for broker logs.", - "Enabled": "Specifies whether broker logs get sent to the specified Kinesis Data Firehose delivery stream." + "DeliveryStream": "", + "Enabled": "" }, "AWS::MSK::Cluster Iam": { - "Enabled": "SASL/IAM authentication is enabled or not." + "Enabled": "" }, "AWS::MSK::Cluster JmxExporter": { - "EnabledInBroker": "Indicates whether you want to enable or disable the JMX Exporter." + "EnabledInBroker": "" }, "AWS::MSK::Cluster LoggingInfo": { - "BrokerLogs": "You can configure your MSK cluster to send broker logs to different destination types. This configuration specifies the details of these destinations." + "BrokerLogs": "" }, "AWS::MSK::Cluster NodeExporter": { - "EnabledInBroker": "Indicates whether you want to enable or disable the Node Exporter." + "EnabledInBroker": "" }, "AWS::MSK::Cluster OpenMonitoring": { - "Prometheus": "Prometheus exporter settings." + "Prometheus": "" }, "AWS::MSK::Cluster Prometheus": { - "JmxExporter": "Indicates whether you want to enable or disable the JMX Exporter.", - "NodeExporter": "Indicates whether you want to enable or disable the Node Exporter." + "JmxExporter": "", + "NodeExporter": "" }, "AWS::MSK::Cluster ProvisionedThroughput": { - "Enabled": "Provisioned throughput is enabled or not.", - "VolumeThroughput": "Throughput value of the EBS volumes for the data drive on each kafka broker node in MiB per second." + "Enabled": "", + "VolumeThroughput": "" }, "AWS::MSK::Cluster PublicAccess": { - "Type": "DISABLED means that public access is turned off. SERVICE_PROVIDED_EIPS means that public access is turned on." + "Type": "" }, "AWS::MSK::Cluster S3": { - "Bucket": "The name of the S3 bucket that is the destination for broker logs.", - "Enabled": "Specifies whether broker logs get sent to the specified Amazon S3 destination.", - "Prefix": "The S3 prefix that is the destination for broker logs." + "Bucket": "", + "Enabled": "", + "Prefix": "" }, "AWS::MSK::Cluster Sasl": { - "Iam": "Details for ClientAuthentication using IAM.", - "Scram": "Details for SASL/SCRAM client authentication." + "Iam": "", + "Scram": "" }, "AWS::MSK::Cluster Scram": { - "Enabled": "SASL/SCRAM authentication is enabled or not." + "Enabled": "" }, "AWS::MSK::Cluster StorageInfo": { - "EBSStorageInfo": "EBS volume information." + "EBSStorageInfo": "" }, "AWS::MSK::Cluster Tls": { - "CertificateAuthorityArnList": "List of AWS Private CA Amazon Resource Name (ARN)s.", - "Enabled": "TLS authentication is enabled or not." + "CertificateAuthorityArnList": "", + "Enabled": "" }, "AWS::MSK::Cluster Unauthenticated": { - "Enabled": "Unauthenticated is enabled or not." + "Enabled": "" }, "AWS::MSK::Cluster VpcConnectivity": { - "ClientAuthentication": "VPC connection control settings for brokers." + "ClientAuthentication": "" }, "AWS::MSK::Cluster VpcConnectivityClientAuthentication": { - "Sasl": "Details for VpcConnectivity ClientAuthentication using SASL.", - "Tls": "Details for VpcConnectivity ClientAuthentication using TLS." + "Sasl": "", + "Tls": "" }, "AWS::MSK::Cluster VpcConnectivityIam": { - "Enabled": "SASL/IAM authentication is enabled or not." + "Enabled": "" }, "AWS::MSK::Cluster VpcConnectivitySasl": { - "Iam": "Details for ClientAuthentication using IAM for VpcConnectivity.", - "Scram": "Details for SASL/SCRAM client authentication for VpcConnectivity." + "Iam": "", + "Scram": "" }, "AWS::MSK::Cluster VpcConnectivityScram": { - "Enabled": "SASL/SCRAM authentication is enabled or not." + "Enabled": "" }, "AWS::MSK::Cluster VpcConnectivityTls": { - "Enabled": "TLS authentication is enabled or not." + "Enabled": "" }, "AWS::MSK::ClusterPolicy": { "ClusterArn": "The Amazon Resource Name (ARN) that uniquely identifies the cluster.", "Policy": "Resource policy for the cluster." }, "AWS::MSK::Configuration": { - "Description": "The description of the configuration.", + "Description": "", "KafkaVersionsList": "", - "LatestRevision": "Latest revision of the configuration.", - "Name": "The name of the configuration. Configuration names are strings that match the regex \"^[0-9A-Za-z][0-9A-Za-z-]{0,}$\".", - "ServerProperties": "Contents of the server.properties file. When using the API, you must ensure that the contents of the file are base64 encoded. When using the console, the SDK, or the CLI, the contents of server.properties can be in plaintext." + "LatestRevision": "", + "Name": "", + "ServerProperties": "" }, "AWS::MSK::Configuration LatestRevision": { "CreationTime": "", @@ -24066,67 +25046,71 @@ "Revision": "" }, "AWS::MSK::Replicator": { - "CurrentVersion": "", - "Description": "", - "KafkaClusters": "", - "ReplicationInfoList": "", - "ReplicatorName": "", - "ServiceExecutionRoleArn": "", - "Tags": "" + "CurrentVersion": "The current version number of the replicator.", + "Description": "A summary description of the replicator.", + "KafkaClusters": "Kafka Clusters to use in setting up sources / targets for replication.", + "ReplicationInfoList": "A list of replication configurations, where each configuration targets a given source cluster to target cluster replication flow.", + "ReplicatorName": "The name of the replicator. Alpha-numeric characters with '-' are allowed.", + "ServiceExecutionRoleArn": "The ARN of the IAM role used by the replicator to access resources in the customer's account (e.g source and target clusters)", + "Tags": "List of tags to attach to created Replicator." }, "AWS::MSK::Replicator AmazonMskCluster": { - "MskClusterArn": "" + "MskClusterArn": "The Amazon Resource Name (ARN) of an Amazon MSK cluster." }, "AWS::MSK::Replicator ConsumerGroupReplication": { - "ConsumerGroupsToExclude": "", - "ConsumerGroupsToReplicate": "", - "DetectAndCopyNewConsumerGroups": "", - "SynchroniseConsumerGroupOffsets": "" + "ConsumerGroupsToExclude": "List of regular expression patterns indicating the consumer groups that should not be replicated.", + "ConsumerGroupsToReplicate": "List of regular expression patterns indicating the consumer groups to copy.", + "DetectAndCopyNewConsumerGroups": "Enables synchronization of consumer groups to target cluster.", + "SynchroniseConsumerGroupOffsets": "Enables synchronization of consumer group offsets to target cluster. The translated offsets will be written to topic __consumer_offsets." }, "AWS::MSK::Replicator KafkaCluster": { - "AmazonMskCluster": "", - "VpcConfig": "" + "AmazonMskCluster": "Details of an Amazon MSK Cluster.", + "VpcConfig": "Details of an Amazon VPC which has network connectivity to the Apache Kafka cluster." }, "AWS::MSK::Replicator KafkaClusterClientVpcConfig": { - "SecurityGroupIds": "", - "SubnetIds": "" + "SecurityGroupIds": "The security groups to attach to the ENIs for the broker nodes.", + "SubnetIds": "The list of subnets in the client VPC to connect to." }, "AWS::MSK::Replicator ReplicationInfo": { - "ConsumerGroupReplication": "", - "SourceKafkaClusterArn": "", - "TargetCompressionType": "", - "TargetKafkaClusterArn": "", - "TopicReplication": "" + "ConsumerGroupReplication": "Configuration relating to consumer group replication.", + "SourceKafkaClusterArn": "The ARN of the source Kafka cluster.", + "TargetCompressionType": "The compression type to use when producing records to target cluster.", + "TargetKafkaClusterArn": "The ARN of the target Kafka cluster.", + "TopicReplication": "Configuration relating to topic replication." }, "AWS::MSK::Replicator ReplicationStartingPosition": { - "Type": "" + "Type": "The type of replication starting position." + }, + "AWS::MSK::Replicator ReplicationTopicNameConfiguration": { + "Type": "The type of replication topic name configuration, identical to upstream topic name or prefixed with source cluster alias." }, "AWS::MSK::Replicator Tag": { "Key": "", "Value": "" }, "AWS::MSK::Replicator TopicReplication": { - "CopyAccessControlListsForTopics": "", - "CopyTopicConfigurations": "", - "DetectAndCopyNewTopics": "", - "StartingPosition": "", - "TopicsToExclude": "", - "TopicsToReplicate": "" + "CopyAccessControlListsForTopics": "Whether to periodically configure remote topic ACLs to match their corresponding upstream topics.", + "CopyTopicConfigurations": "Whether to periodically configure remote topics to match their corresponding upstream topics.", + "DetectAndCopyNewTopics": "Whether to periodically check for new topics and partitions.", + "StartingPosition": "Specifies the position in the topics to start replicating from.", + "TopicNameConfiguration": "Configuration for specifying replicated topic names will be the same as their corresponding upstream topics or prefixed with source cluster alias.", + "TopicsToExclude": "List of regular expression patterns indicating the topics that should not be replicated.", + "TopicsToReplicate": "List of regular expression patterns indicating the topics to copy." }, "AWS::MSK::ServerlessCluster": { - "ClientAuthentication": "Includes all client authentication information.", + "ClientAuthentication": "", "ClusterName": "", "Tags": "", "VpcConfigs": "" }, "AWS::MSK::ServerlessCluster ClientAuthentication": { - "Sasl": "Details for client authentication using SASL. To turn on SASL, you must also turn on `EncryptionInTransit` by setting `inCluster` to true. You must set `clientBroker` to either `TLS` or `TLS_PLAINTEXT` . If you choose `TLS_PLAINTEXT` , then you must also set `unauthenticated` to true." + "Sasl": "" }, "AWS::MSK::ServerlessCluster Iam": { - "Enabled": "SASL/IAM authentication is enabled or not." + "Enabled": "" }, "AWS::MSK::ServerlessCluster Sasl": { - "Iam": "Details for ClientAuthentication using IAM." + "Iam": "" }, "AWS::MSK::ServerlessCluster VpcConfig": { "SecurityGroups": "", @@ -24134,15 +25118,15 @@ }, "AWS::MSK::VpcConnection": { "Authentication": "The type of private link authentication.", - "ClientSubnets": "The list of subnets in the client VPC to connect to.", - "SecurityGroups": "The security groups to attach to the ENIs for the broker nodes.", - "Tags": "Create tags when creating the VPC connection.", - "TargetClusterArn": "The Amazon Resource Name (ARN) of the cluster.", - "VpcId": "The VPC id of the remote client." + "ClientSubnets": "", + "SecurityGroups": "", + "Tags": "", + "TargetClusterArn": "", + "VpcId": "" }, "AWS::MWAA::Environment": { "AirflowConfigurationOptions": "A list of key-value pairs containing the Airflow configuration options for your environment. For example, `core.default_timezone: utc` . To learn more, see [Apache Airflow configuration options](https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-env-variables.html) .", - "AirflowVersion": "The version of Apache Airflow to use for the environment. If no value is specified, defaults to the latest version.\n\nIf you specify a newer version number for an existing environment, the version update requires some service interruption before taking effect.\n\n*Allowed Values* : `1.10.12` | `2.0.2` | `2.2.2` | `2.4.3` | `2.5.1` | `2.6.3` | `2.7.2` (latest)", + "AirflowVersion": "The version of Apache Airflow to use for the environment. If no value is specified, defaults to the latest version.\n\nIf you specify a newer version number for an existing environment, the version update requires some service interruption before taking effect.\n\n*Allowed Values* : `1.10.12` | `2.0.2` | `2.2.2` | `2.4.3` | `2.5.1` | `2.6.3` | `2.7.2` | `2.8.1` | `2.9.2` (latest)", "DagS3Path": "The relative path to the DAGs folder on your Amazon S3 bucket. For example, `dags` . To learn more, see [Adding or updating DAGs](https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-dag-folder.html) .", "EndpointManagement": "Defines whether the VPC endpoints configured for the environment are created, and managed, by the customer or by Amazon MWAA. If set to `SERVICE` , Amazon MWAA will create and manage the required VPC endpoints in your VPC. If set to `CUSTOMER` , you must create, and manage, the VPC endpoints in your VPC.", "EnvironmentClass": "The environment class type. Valid values: `mw1.small` , `mw1.medium` , `mw1.large` . To learn more, see [Amazon MWAA environment class](https://docs.aws.amazon.com/mwaa/latest/userguide/environment-class.html) .", @@ -24391,6 +25375,7 @@ "Name": "The name of the flow.", "Source": "The settings for the source that you want to use for the new flow.", "SourceFailoverConfig": "The settings for source failover.", + "SourceMonitoringConfig": "", "VpcInterfaces": "The VPC interfaces that you added to this flow." }, "AWS::MediaConnect::Flow Encryption": { @@ -24477,6 +25462,9 @@ "VpcInterfaceName": "The name of the VPC interface that the source content comes from.", "WhitelistCidr": "The range of IP addresses that are allowed to contribute content to your source. Format the IP addresses as a Classless Inter-Domain Routing (CIDR) block; for example, 10.0.0.0/16." }, + "AWS::MediaConnect::Flow SourceMonitoringConfig": { + "ThumbnailState": "" + }, "AWS::MediaConnect::Flow SourcePriority": { "PrimarySource": "The name of the source you choose as the primary source for this flow." }, @@ -24521,6 +25509,7 @@ "MediaStreamOutputConfigurations": "The definition for each media stream that is associated with the output.", "MinLatency": "The minimum latency in milliseconds for SRT-based streams. In streams that use the SRT protocol, this value that you set on your MediaConnect source or output represents the minimal potential latency of that connection. The latency of the stream is set to the highest number between the sender\u2019s minimum latency and the receiver\u2019s minimum latency.", "Name": "The name of the output. This value must be unique within the current flow.", + "OutputStatus": "An indication of whether the new output should be enabled or disabled as soon as it is created. If you don't specify the outputStatus field in your request, MediaConnect sets it to ENABLED.", "Port": "The port to use when MediaConnect distributes content to the output.", "Protocol": "The protocol to use for the output.", "RemoteId": "The identifier that is assigned to the Zixi receiver. This parameter applies only to outputs that use Zixi pull.", @@ -25085,6 +26074,7 @@ "LookAheadRateControl": "The amount of lookahead. A value of low can decrease latency and memory usage, while high can produce better quality for certain content.", "MaxBitrate": "For QVBR: See the tooltip for Quality level. For VBR: Set the maximum bitrate in order to accommodate expected spikes in the complexity of the video.", "MinIInterval": "Meaningful only if sceneChangeDetect is set to enabled. This setting enforces separation between repeated (cadence) I-frames and I-frames inserted by Scene Change Detection. If a scene change I-frame is within I-interval frames of a cadence I-frame, the GOP is shrunk or stretched to the scene change I-frame. GOP stretch requires enabling lookahead as well as setting the I-interval. The normal cadence resumes for the next GOP. Note that the maximum GOP stretch = GOP size + Min-I-interval - 1.", + "MinQp": "", "NumRefFrames": "The number of reference frames to use. The encoder might use more than requested if you use B-frames or interlaced encoding.", "ParControl": "Indicates how the output pixel aspect ratio is specified. If \"specified\" is selected, the output video pixel aspect ratio is determined by parNumerator and parDenominator. If \"initializeFromSource\" is selected, the output pixels aspect ratio will be set equal to the input video pixel aspect ratio of the first input.", "ParDenominator": "The Pixel Aspect Ratio denominator.", @@ -25134,6 +26124,7 @@ "LookAheadRateControl": "Amount of lookahead. A value of low can decrease latency and memory usage, while high can produce better quality for certain content.", "MaxBitrate": "For QVBR: See the tooltip for Quality level", "MinIInterval": "Only meaningful if sceneChangeDetect is set to enabled. Defaults to 5 if multiplex rate control is used. Enforces separation between repeated (cadence) I-frames and I-frames inserted by Scene Change Detection. If a scene change I-frame is within I-interval frames of a cadence I-frame, the GOP is shrunk and/or stretched to the scene change I-frame. GOP stretch requires enabling lookahead as well as setting I-interval. The normal cadence resumes for the next GOP. Note: Maximum GOP stretch = GOP size + Min-I-interval - 1", + "MinQp": "", "MvOverPictureBoundaries": "", "MvTemporalPredictor": "", "ParDenominator": "Pixel Aspect Ratio denominator.", @@ -25663,6 +26654,71 @@ "AWS::MediaLive::Channel WebvttDestinationSettings": { "StyleControl": "Controls whether the color and position of the source captions is passed through to the WebVTT output captions. PASSTHROUGH - Valid only if the source captions are EMBEDDED or TELETEXT. NO_STYLE_DATA - Don't pass through the style. The output captions will not contain any font styling information." }, + "AWS::MediaLive::ChannelPlacementGroup": { + "ClusterId": "", + "Name": "", + "Nodes": "", + "Tags": "" + }, + "AWS::MediaLive::ChannelPlacementGroup Tags": { + "Key": "", + "Value": "" + }, + "AWS::MediaLive::CloudWatchAlarmTemplate": { + "ComparisonOperator": "The comparison operator used to compare the specified statistic and the threshold.", + "DatapointsToAlarm": "The number of datapoints within the evaluation period that must be breaching to trigger the alarm.", + "Description": "A resource's optional description.", + "EvaluationPeriods": "The number of periods over which data is compared to the specified threshold.", + "GroupIdentifier": "A cloudwatch alarm template group's identifier. Can be either be its id or current name.", + "MetricName": "The name of the metric associated with the alarm. Must be compatible with targetResourceType.", + "Name": "A resource's name. Names must be unique within the scope of a resource type in a specific region.", + "Period": "The period, in seconds, over which the specified statistic is applied.", + "Statistic": "The statistic to apply to the alarm's metric data.", + "Tags": "", + "TargetResourceType": "The resource type this template should dynamically generate CloudWatch metric alarms for.", + "Threshold": "The threshold value to compare with the specified statistic.", + "TreatMissingData": "Specifies how missing data points are treated when evaluating the alarm's condition." + }, + "AWS::MediaLive::CloudWatchAlarmTemplateGroup": { + "Description": "A resource's optional description.", + "Name": "A resource's name. Names must be unique within the scope of a resource type in a specific region.", + "Tags": "" + }, + "AWS::MediaLive::Cluster": { + "ClusterType": "", + "InstanceRoleArn": "", + "Name": "", + "NetworkSettings": "", + "Tags": "" + }, + "AWS::MediaLive::Cluster ClusterNetworkSettings": { + "DefaultRoute": "", + "InterfaceMappings": "" + }, + "AWS::MediaLive::Cluster InterfaceMapping": { + "LogicalInterfaceName": "", + "NetworkId": "" + }, + "AWS::MediaLive::Cluster Tags": { + "Key": "", + "Value": "" + }, + "AWS::MediaLive::EventBridgeRuleTemplate": { + "Description": "A resource's optional description.", + "EventTargets": "The destinations that will receive the event notifications.", + "EventType": "The type of event to match with the rule.", + "GroupIdentifier": "An eventbridge rule template group's identifier. Can be either be its id or current name.", + "Name": "A resource's name. Names must be unique within the scope of a resource type in a specific region.", + "Tags": "" + }, + "AWS::MediaLive::EventBridgeRuleTemplate EventBridgeRuleTemplateTarget": { + "Arn": "Target ARNs must be either an SNS topic or CloudWatch log group." + }, + "AWS::MediaLive::EventBridgeRuleTemplateGroup": { + "Description": "A resource's optional description.", + "Name": "A resource's name. Names must be unique within the scope of a resource type in a specific region.", + "Tags": "" + }, "AWS::MediaLive::Input": { "Destinations": "Settings that apply only if the input is a push type of input.", "InputDevices": "Settings that apply only if the input is an Elemental Link input.", @@ -25671,6 +26727,7 @@ "Name": "A name for the input.", "RoleArn": "The IAM role for MediaLive to assume when creating a MediaConnect input or Amazon VPC input. This doesn't apply to other types of inputs. The role is identified by its ARN.", "Sources": "Settings that apply only if the input is a pull type of input.", + "SrtSettings": "", "Tags": "A collection of tags for this input. Each tag is a key-value pair.", "Type": "The type for this input.", "Vpc": "Settings that apply only if the input is an push input where the source is on Amazon VPC." @@ -25693,6 +26750,20 @@ "AWS::MediaLive::Input MediaConnectFlowRequest": { "FlowArn": "The ARN of one or two MediaConnect flows that are the sources for this MediaConnect input." }, + "AWS::MediaLive::Input SrtCallerDecryptionRequest": { + "Algorithm": "", + "PassphraseSecretArn": "" + }, + "AWS::MediaLive::Input SrtCallerSourceRequest": { + "Decryption": "", + "MinimumLatency": "", + "SrtListenerAddress": "", + "SrtListenerPort": "", + "StreamId": "" + }, + "AWS::MediaLive::Input SrtSettingsRequest": { + "SrtCallerSources": "" + }, "AWS::MediaLive::InputSecurityGroup": { "Tags": "A collection of tags for this input security group. Each tag is a key-value pair.", "WhitelistRules": "The list of IPv4 CIDR addresses to include in the input security group as \"allowed\" addresses." @@ -25724,7 +26795,6 @@ "Value": "" }, "AWS::MediaLive::Multiplexprogram": { - "ChannelId": "The unique ID of the channel.", "MultiplexId": "The unique id of the multiplex.", "MultiplexProgramSettings": "Multiplex Program settings configuration.", "PacketIdentifiersMap": "", @@ -25770,6 +26840,60 @@ "ConstantBitrate": "The constant bitrate configuration for the video encode.\nWhen this field is defined, StatmuxSettings must be undefined.", "StatmuxSettings": "Statmux rate control settings.\nWhen this field is defined, ConstantBitrate must be undefined." }, + "AWS::MediaLive::Network": { + "IpPools": "", + "Name": "", + "Routes": "", + "Tags": "" + }, + "AWS::MediaLive::Network IpPool": { + "Cidr": "" + }, + "AWS::MediaLive::Network Route": { + "Cidr": "", + "Gateway": "" + }, + "AWS::MediaLive::Network Tags": { + "Key": "", + "Value": "" + }, + "AWS::MediaLive::SdiSource": { + "Mode": "", + "Name": "", + "Tags": "", + "Type": "" + }, + "AWS::MediaLive::SdiSource Tags": { + "Key": "", + "Value": "" + }, + "AWS::MediaLive::SignalMap": { + "CloudWatchAlarmTemplateGroupIdentifiers": "A cloudwatch alarm template group's identifier. Can be either be its id or current name.", + "Description": "A resource's optional description.", + "DiscoveryEntryPointArn": "A top-level supported Amazon Web Services resource ARN to discover a signal map from.", + "EventBridgeRuleTemplateGroupIdentifiers": "An eventbridge rule template group's identifier. Can be either be its id or current name.", + "ForceRediscovery": "If true, will force a rediscovery of a signal map if an unchanged discoveryEntryPointArn is provided.", + "Name": "A resource's name. Names must be unique within the scope of a resource type in a specific region.", + "Tags": "" + }, + "AWS::MediaLive::SignalMap MediaResource": { + "Destinations": "A direct destination neighbor to an Amazon Web Services media resource.", + "Name": "The logical name of an Amazon Web Services media resource.", + "Sources": "A direct source neighbor to an Amazon Web Services media resource." + }, + "AWS::MediaLive::SignalMap MediaResourceNeighbor": { + "Arn": "The ARN of a resource used in Amazon Web Services media workflows.", + "Name": "The logical name of an Amazon Web Services media resource." + }, + "AWS::MediaLive::SignalMap MonitorDeployment": { + "DetailsUri": "URI associated with a signal map's monitor deployment.", + "ErrorMessage": "Error message associated with a failed monitor deployment of a signal map.", + "Status": "The signal map monitor deployment status." + }, + "AWS::MediaLive::SignalMap SuccessfulMonitorDeployment": { + "DetailsUri": "URI associated with a signal map's monitor deployment.", + "Status": "A signal map's monitor deployment status." + }, "AWS::MediaPackage::Asset": { "EgressEndpoints": "List of playback endpoints that are available for this asset.", "Id": "Unique identifier that you assign to the asset.", @@ -26039,6 +27163,7 @@ "ChannelGroupName": "The name of the channel group associated with the channel configuration.", "ChannelName": "The name of the channel.", "Description": "The description of the channel.", + "InputType": "", "Tags": "The tags associated with the channel." }, "AWS::MediaPackageV2::Channel IngestEndpoint": { @@ -26069,6 +27194,7 @@ "ContainerType": "The container type associated with the origin endpoint configuration.", "DashManifests": "A DASH manifest configuration.", "Description": "The description associated with the origin endpoint.", + "ForceEndpointErrorConfiguration": "", "HlsManifests": "The HLS manfiests associated with the origin endpoint configuration.", "LowLatencyHlsManifests": "The low-latency HLS (LL-HLS) manifests associated with the origin endpoint.", "OriginEndpointName": "The name of the origin endpoint associated with the origin endpoint configuration.", @@ -26113,6 +27239,9 @@ "Start": "Optionally specify the start time for all of your manifest egress requests. When you include start time, note that you cannot use start time query parameters for this manifest's endpoint URL.", "TimeDelaySeconds": "Optionally specify the time delay for all of your manifest egress requests. Enter a value that is smaller than your endpoint's startover window. When you include time delay, note that you cannot use time delay query parameters for this manifest's endpoint URL." }, + "AWS::MediaPackageV2::OriginEndpoint ForceEndpointErrorConfiguration": { + "EndpointErrorConditions": "" + }, "AWS::MediaPackageV2::OriginEndpoint HlsManifestConfiguration": { "ChildManifestName": "The name of the child manifest associated with the HLS manifest configuration.", "FilterConfiguration": "", @@ -26445,7 +27574,7 @@ "EnableCloudwatchLogsExports": "Specifies a list of log types that are enabled for export to CloudWatch Logs.", "EngineVersion": "Indicates the database engine version.", "IamAuthEnabled": "True if mapping of Amazon Identity and Access Management (IAM) accounts to database accounts is enabled, and otherwise false.", - "KmsKeyId": "If `StorageEncrypted` is true, the Amazon KMS key identifier for the encrypted DB cluster.", + "KmsKeyId": "The Amazon Resource Name (ARN) of the KMS key that is used to encrypt the database instances in the DB cluster, such as `arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef` . If you enable the `StorageEncrypted` property but don't specify this property, the default KMS key is used. If you specify this property, you must set the `StorageEncrypted` property to `true` .", "Port": "The port number on which the DB instances in the DB cluster accept connections.\n\nIf not specified, the default port used is `8182` .\n\n> This property will soon be deprecated. Please update existing templates to use the new `DBPort` property that has the same functionality.", "PreferredBackupWindow": "Specifies the daily time range during which automated backups are created if automated backups are enabled, as determined by the `BackupRetentionPeriod` .\n\nAn update may require some interruption.", "PreferredMaintenanceWindow": "Specifies the weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).", @@ -26454,7 +27583,7 @@ "ServerlessScalingConfiguration": "", "SnapshotIdentifier": "Specifies the identifier for a DB cluster snapshot. Must match the identifier of an existing snapshot.\n\nAfter you restore a DB cluster using a `SnapshotIdentifier` , you must specify the same `SnapshotIdentifier` for any future updates to the DB cluster. When you specify this property for an update, the DB cluster is not restored from the snapshot again, and the data in the database is not changed.\n\nHowever, if you don't specify the `SnapshotIdentifier` , an empty DB cluster is created, and the original DB cluster is deleted. If you specify a property that is different from the previous snapshot restore property, the DB cluster is restored from the snapshot specified by the `SnapshotIdentifier` , and the original DB cluster is deleted.", "SourceDBClusterIdentifier": "Creates a new DB cluster from a DB snapshot or DB cluster snapshot.\n\nIf a DB snapshot is specified, the target DB cluster is created from the source DB snapshot with a default configuration and default security group.\n\nIf a DB cluster snapshot is specified, the target DB cluster is created from the source DB cluster restore point with the same configuration as the original source DB cluster, except that the new DB cluster is created with the default security group.", - "StorageEncrypted": "Indicates whether the DB cluster is encrypted.\n\nIf you specify the `DBClusterIdentifier` , `DBSnapshotIdentifier` , or `SourceDBInstanceIdentifier` property, don't specify this property. The value is inherited from the cluster, snapshot, or source DB instance. If you specify the `KmsKeyId` property, you must enable encryption.\n\nIf you specify the `KmsKeyId` , you must enable encryption by setting `StorageEncrypted` to true.", + "StorageEncrypted": "Indicates whether the DB cluster is encrypted.\n\nIf you specify the `KmsKeyId` property, then you must enable encryption and set this property to `true` .\n\nIf you enable the `StorageEncrypted` property but don't specify the `KmsKeyId` property, then the default KMS key is used. If you specify the `KmsKeyId` property, then that KMS key is used to encrypt the database instances in the DB cluster.\n\nIf you specify the `SourceDBClusterIdentifier` property, and don't specify this property or disable it, the value is inherited from the source DB cluster. If the source DB cluster is encrypted, the `KmsKeyId` property from the source cluster is used.\n\nIf you specify the `DBSnapshotIdentifier` and don't specify this property or disable it, the value is inherited from the snapshot and the specified `KmsKeyId` property from the snapshot is used.", "Tags": "The tags assigned to this cluster.", "UseLatestRestorableTime": "Creates a new DB cluster from a DB snapshot or DB cluster snapshot.\n\nIf a DB snapshot is specified, the target DB cluster is created from the source DB snapshot with a default configuration and default security group.\n\nIf a DB cluster snapshot is specified, the target DB cluster is created from the source DB cluster restore point with the same configuration as the original source DB cluster, except that the new DB cluster is created with the default security group.", "VpcSecurityGroupIds": "Provides a list of VPC security groups that the DB cluster belongs to." @@ -26630,9 +27759,9 @@ "LoggingConfiguration": "Defines how AWS Network Firewall performs logging for a `Firewall` ." }, "AWS::NetworkFirewall::LoggingConfiguration LogDestinationConfig": { - "LogDestination": "The named location for the logs, provided in a key:value mapping that is specific to the chosen destination type.\n\n- For an Amazon S3 bucket, provide the name of the bucket, with key `bucketName` , and optionally provide a prefix, with key `prefix` . The following example specifies an Amazon S3 bucket named `DOC-EXAMPLE-BUCKET` and the prefix `alerts` :\n\n`\"LogDestination\": { \"bucketName\": \"DOC-EXAMPLE-BUCKET\", \"prefix\": \"alerts\" }`\n- For a CloudWatch log group, provide the name of the CloudWatch log group, with key `logGroup` . The following example specifies a log group named `alert-log-group` :\n\n`\"LogDestination\": { \"logGroup\": \"alert-log-group\" }`\n- For a Firehose delivery stream, provide the name of the delivery stream, with key `deliveryStream` . The following example specifies a delivery stream named `alert-delivery-stream` :\n\n`\"LogDestination\": { \"deliveryStream\": \"alert-delivery-stream\" }`", + "LogDestination": "The named location for the logs, provided in a key:value mapping that is specific to the chosen destination type.\n\n- For an Amazon S3 bucket, provide the name of the bucket, with key `bucketName` , and optionally provide a prefix, with key `prefix` .\n\nThe following example specifies an Amazon S3 bucket named `DOC-EXAMPLE-BUCKET` and the prefix `alerts` :\n\n`\"LogDestination\": { \"bucketName\": \"DOC-EXAMPLE-BUCKET\", \"prefix\": \"alerts\" }`\n- For a CloudWatch log group, provide the name of the CloudWatch log group, with key `logGroup` . The following example specifies a log group named `alert-log-group` :\n\n`\"LogDestination\": { \"logGroup\": \"alert-log-group\" }`\n- For a Firehose delivery stream, provide the name of the delivery stream, with key `deliveryStream` . The following example specifies a delivery stream named `alert-delivery-stream` :\n\n`\"LogDestination\": { \"deliveryStream\": \"alert-delivery-stream\" }`", "LogDestinationType": "The type of storage destination to send these logs to. You can send logs to an Amazon S3 bucket, a CloudWatch log group, or a Firehose delivery stream.", - "LogType": "The type of log to send. Alert logs report traffic that matches a stateful rule with an action setting that sends an alert log message. Flow logs are standard network traffic flow logs." + "LogType": "The type of log to record. You can record the following types of logs from your Network Firewall stateful engine.\n\n- `ALERT` - Logs for traffic that matches your stateful rules and that have an action that sends an alert. A stateful rule sends alerts for the rule actions DROP, ALERT, and REJECT. For more information, see the `StatefulRule` property.\n- `FLOW` - Standard network traffic flow logs. The stateful rules engine records flow logs for all network traffic that it receives. Each flow log record captures the network flow for a specific standard stateless rule group.\n- `TLS` - Logs for events that are related to TLS inspection. For more information, see [Inspecting SSL/TLS traffic with TLS inspection configurations](https://docs.aws.amazon.com/network-firewall/latest/developerguide/tls-inspection-configurations.html) in the *Network Firewall Developer Guide* ." }, "AWS::NetworkFirewall::LoggingConfiguration LoggingConfiguration": { "LogDestinationConfigs": "Defines the logging destinations for the logs for a firewall. Network Firewall generates logs for stateful rule groups." @@ -26789,7 +27918,9 @@ "AWS::NetworkManager::ConnectAttachment": { "CoreNetworkId": "The ID of the core network where the Connect attachment is located.", "EdgeLocation": "The Region where the edge is located.", + "NetworkFunctionGroupName": "The name of the network function group.", "Options": "Options for connecting an attachment.", + "ProposedNetworkFunctionGroupChange": "Describes proposed changes to a network function group.", "ProposedSegmentChange": "Describes a proposed segment change. In some cases, the segment change must first be evaluated and accepted.", "Tags": "The tags associated with the Connect attachment.", "TransportAttachmentId": "The ID of the transport attachment." @@ -26797,6 +27928,11 @@ "AWS::NetworkManager::ConnectAttachment ConnectAttachmentOptions": { "Protocol": "The protocol used for the attachment connection." }, + "AWS::NetworkManager::ConnectAttachment ProposedNetworkFunctionGroupChange": { + "AttachmentPolicyRuleNumber": "The proposed new attachment policy rule number for the network function group.", + "NetworkFunctionGroupName": "The proposed name change for the network function group name.", + "Tags": "The list of proposed changes to the key-value tags associated with the network function group." + }, "AWS::NetworkManager::ConnectAttachment ProposedSegmentChange": { "AttachmentPolicyRuleNumber": "The rule number in the policy document that applies to this change.", "SegmentName": "The name of the segment to change.", @@ -26846,11 +27982,20 @@ "EdgeLocation": "The Region where a core network edge is located.", "InsideCidrBlocks": "The inside IP addresses used for core network edges." }, + "AWS::NetworkManager::CoreNetwork CoreNetworkNetworkFunctionGroup": { + "EdgeLocations": "The core network edge locations.", + "Name": "The name of the network function group.", + "Segments": "The segments associated with the network function group." + }, "AWS::NetworkManager::CoreNetwork CoreNetworkSegment": { "EdgeLocations": "The Regions where the edges are located.", "Name": "The name of a core network segment.", "SharedSegments": "The shared segments of a core network." }, + "AWS::NetworkManager::CoreNetwork Segments": { + "SendTo": "", + "SendVia": "" + }, "AWS::NetworkManager::CoreNetwork Tag": { "Key": "The tag key.\n\nConstraints: Maximum length of 128 characters.", "Value": "The tag value.\n\nConstraints: Maximum length of 256 characters." @@ -26935,10 +28080,17 @@ }, "AWS::NetworkManager::SiteToSiteVpnAttachment": { "CoreNetworkId": "", + "NetworkFunctionGroupName": "The name of the network function group.", + "ProposedNetworkFunctionGroupChange": "Describes proposed changes to a network function group.", "ProposedSegmentChange": "Describes a proposed segment change. In some cases, the segment change must first be evaluated and accepted.", "Tags": "The tags associated with the Site-to-Site VPN attachment.", "VpnConnectionArn": "The ARN of the site-to-site VPN attachment." }, + "AWS::NetworkManager::SiteToSiteVpnAttachment ProposedNetworkFunctionGroupChange": { + "AttachmentPolicyRuleNumber": "The proposed new attachment policy rule number for the network function group.", + "NetworkFunctionGroupName": "The proposed name change for the network function group name.", + "Tags": "The list of proposed changes to the key-value tags associated with the network function group." + }, "AWS::NetworkManager::SiteToSiteVpnAttachment ProposedSegmentChange": { "AttachmentPolicyRuleNumber": "The rule number in the policy document that applies to this change.", "SegmentName": "The name of the segment to change.", @@ -26962,11 +28114,18 @@ "TransitGatewayArn": "The Amazon Resource Name (ARN) of the transit gateway." }, "AWS::NetworkManager::TransitGatewayRouteTableAttachment": { + "NetworkFunctionGroupName": "The name of the network function group.", "PeeringId": "The ID of the transit gateway peering.", + "ProposedNetworkFunctionGroupChange": "Describes proposed changes to a network function group.", "ProposedSegmentChange": "This property is read-only. Values can't be assigned to it.", "Tags": "The list of key-value pairs associated with the transit gateway route table attachment.", "TransitGatewayRouteTableArn": "The ARN of the transit gateway attachment route table. For example, `\"TransitGatewayRouteTableArn\": \"arn:aws:ec2:us-west-2:123456789012:transit-gateway-route-table/tgw-rtb-9876543210123456\"` ." }, + "AWS::NetworkManager::TransitGatewayRouteTableAttachment ProposedNetworkFunctionGroupChange": { + "AttachmentPolicyRuleNumber": "The proposed new attachment policy rule number for the network function group.", + "NetworkFunctionGroupName": "The proposed name change for the network function group name.", + "Tags": "The list of proposed changes to the key-value tags associated with the network function group." + }, "AWS::NetworkManager::TransitGatewayRouteTableAttachment ProposedSegmentChange": { "AttachmentPolicyRuleNumber": "The rule number in the policy document that applies to this change.", "SegmentName": "The name of the segment to change.", @@ -26979,11 +28138,17 @@ "AWS::NetworkManager::VpcAttachment": { "CoreNetworkId": "The core network ID.", "Options": "Options for creating the VPC attachment.", + "ProposedNetworkFunctionGroupChange": "Describes proposed changes to a network function group.", "ProposedSegmentChange": "Describes a proposed segment change. In some cases, the segment change must first be evaluated and accepted.", "SubnetArns": "The subnet ARNs.", "Tags": "The tags associated with the VPC attachment.", "VpcArn": "The ARN of the VPC attachment." }, + "AWS::NetworkManager::VpcAttachment ProposedNetworkFunctionGroupChange": { + "AttachmentPolicyRuleNumber": "The proposed new attachment policy rule number for the network function group.", + "NetworkFunctionGroupName": "The proposed name change for the network function group name.", + "Tags": "The list of proposed changes to the key-value tags associated with the network function group." + }, "AWS::NetworkManager::VpcAttachment ProposedSegmentChange": { "AttachmentPolicyRuleNumber": "The rule number in the policy document that applies to this change.", "SegmentName": "The name of the segment to change.", @@ -27140,6 +28305,10 @@ "Key": "The tag key. Tag keys must be unique for the pipeline to which they are attached.", "Value": "The value assigned to the corresponding tag key. Tag values can be null and don't have to be unique in a tag set. For example, you can have a key value pair in a tag set of `project : Trinity` and `cost-center : Trinity`" }, + "AWS::OSIS::Pipeline VpcAttachmentOptions": { + "AttachToVpc": "Whether a VPC is attached to the pipeline.", + "CidrBlock": "The CIDR block to be reserved for OpenSearch Ingestion to create elastic network interfaces (ENIs)." + }, "AWS::OSIS::Pipeline VpcEndpoint": { "VpcEndpointId": "The unique identifier of the endpoint.", "VpcId": "The ID for your VPC. AWS PrivateLink generates this value when you create a VPC.", @@ -27148,6 +28317,7 @@ "AWS::OSIS::Pipeline VpcOptions": { "SecurityGroupIds": "A list of security groups associated with the VPC endpoint.", "SubnetIds": "A list of subnet IDs associated with the VPC endpoint.", + "VpcAttachmentOptions": "Options for attaching a VPC to a pipeline.", "VpcEndpointManagement": "Defines whether you or Amazon OpenSearch Ingestion service create and manage the VPC endpoint configured for the pipeline." }, "AWS::Oam::Link": { @@ -27313,6 +28483,7 @@ "LogPublishingOptions": "An object with one or more of the following keys: `SEARCH_SLOW_LOGS` , `ES_APPLICATION_LOGS` , `INDEX_SLOW_LOGS` , `AUDIT_LOGS` , depending on the types of logs you want to publish. Each key needs a valid `LogPublishingOption` value. For the full syntax, see the [examples](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-opensearchservice-domain.html#aws-resource-opensearchservice-domain--examples) .", "NodeToNodeEncryptionOptions": "Specifies whether node-to-node encryption is enabled. See [Node-to-node encryption for Amazon OpenSearch Service](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/ntn.html) .", "OffPeakWindowOptions": "Options for a domain's off-peak window, during which OpenSearch Service can perform mandatory configuration changes on the domain.", + "SkipShardMigrationWait": "", "SnapshotOptions": "*DEPRECATED* . The automated snapshot configuration for the OpenSearch Service domain indexes.", "SoftwareUpdateOptions": "Service software update options for the domain.", "Tags": "An arbitrary set of tags (key\u2013value pairs) to associate with the OpenSearch Service domain.", @@ -27323,6 +28494,7 @@ "AnonymousAuthEnabled": "True to enable a 30-day migration period during which administrators can create role mappings. Only necessary when [enabling fine-grained access control on an existing domain](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/fgac.html#fgac-enabling-existing) .", "Enabled": "True to enable fine-grained access control. You must also enable encryption of data at rest and node-to-node encryption. See [Fine-grained access control in Amazon OpenSearch Service](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/fgac.html) .", "InternalUserDatabaseEnabled": "True to enable the internal user database.", + "JWTOptions": "Container for information about the JWT configuration of the Amazon OpenSearch Service.", "MasterUserOptions": "Specifies information about the master user.", "SAMLOptions": "Container for information about the SAML configuration for OpenSearch Dashboards." }, @@ -27371,6 +28543,12 @@ "EntityId": "The unique entity ID of the application in the SAML identity provider.", "MetadataContent": "The metadata of the SAML application, in XML format." }, + "AWS::OpenSearchService::Domain JWTOptions": { + "Enabled": "", + "PublicKey": "", + "RolesKey": "", + "SubjectKey": "" + }, "AWS::OpenSearchService::Domain LogPublishingOption": { "CloudWatchLogsLogGroupArn": "Specifies the CloudWatch log group to publish to. Required if you enable log publishing.", "Enabled": "If `true` , enables the publishing of logs to CloudWatch.\n\nDefault: `false` ." @@ -27700,7 +28878,7 @@ "Value": "The string value that's associated with the key of the tag. You can set the value of a tag to an empty string, but you can't set the value of a tag to null." }, "AWS::Organizations::Policy": { - "Content": "The policy text content. You can specify the policy content as a JSON object or a JSON string.\n\n> When you specify the policy content as a JSON string, you can't perform drift detection on the CloudFormation stack. For this reason, we recommend specifying the policy content as a JSON object instead. \n\nThe text that you supply must adhere to the rules of the policy type you specify in the `Type` parameter. The following AWS Organizations quotas are enforced for the maximum size of a policy document:\n\n- Service control policies: 5,120 characters\n- AI services opt-out policies: 2,500 characters\n- Backup policies: 10,000 characters\n- Tag policies: 10,000 characters\n\nFor more information about Organizations service quotas, see [Quotas for AWS Organizations](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) in the *AWS Organizations User Guide* .", + "Content": "The policy text content. You can specify the policy content as a JSON object or a JSON string.\n\n> When you specify the policy content as a JSON string, you can't perform drift detection on the CloudFormation stack. For this reason, we recommend specifying the policy content as a JSON object instead. \n\nThe text that you supply must adhere to the rules of the policy type you specify in the `Type` parameter. The following AWS Organizations quotas are enforced for the maximum size of a policy document:\n\n- Service control policies: 5,120 characters\n- Backup policies: 10,000 characters\n- Tag policies: 10,000 characters\n- Chatbot policies: 10,000 characters\n- AI services opt-out policies: 2,500 characters\n\nFor more information about Organizations service quotas, see [Quotas for AWS Organizations](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) in the *AWS Organizations User Guide* .", "Description": "Human readable description of the policy.", "Name": "Name of the policy.\n\nThe [regex pattern](https://docs.aws.amazon.com/http://wikipedia.org/wiki/regex) that is used to validate this parameter is a string of any of the characters in the ASCII character range.", "Tags": "A list of tags that you want to attach to the newly created policy. For each tag in the list, you must specify both a tag key and a value. You can set the value to an empty string, but you can't set it to `null` . For more information about tagging, see [Tagging AWS Organizations resources](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_tagging.html) in the AWS Organizations User Guide.\n\n> If any one of the tags is not valid or if you exceed the allowed number of tags for a policy, then the entire request fails and the policy is not created.", @@ -27945,6 +29123,27 @@ "AutoEnroll": "Allow or deny an Active Directory group from autoenrolling certificates issued against a template. The Active Directory group must be allowed to enroll to allow autoenrollment", "Enroll": "Allow or deny an Active Directory group from enrolling certificates issued against a template." }, + "AWS::PCAConnectorSCEP::Challenge": { + "ConnectorArn": "The Amazon Resource Name (ARN) of the connector.", + "Tags": "" + }, + "AWS::PCAConnectorSCEP::Connector": { + "CertificateAuthorityArn": "The Amazon Resource Name (ARN) of the certificate authority associated with the connector.", + "MobileDeviceManagement": "Contains settings relevant to the mobile device management system that you chose for the connector. If you didn't configure `MobileDeviceManagement` , then the connector is for general-purpose use and this object is empty.", + "Tags": "" + }, + "AWS::PCAConnectorSCEP::Connector IntuneConfiguration": { + "AzureApplicationId": "The directory (tenant) ID from your Microsoft Entra ID app registration.", + "Domain": "The primary domain from your Microsoft Entra ID app registration." + }, + "AWS::PCAConnectorSCEP::Connector MobileDeviceManagement": { + "Intune": "Configuration settings for use with Microsoft Intune. For information about using Connector for SCEP for Microsoft Intune, see [Using Connector for SCEP for Microsoft Intune](https://docs.aws.amazon.com/privateca/latest/userguide/scep-connector.htmlconnector-for-scep-intune.html) ." + }, + "AWS::PCAConnectorSCEP::Connector OpenIdConfiguration": { + "Audience": "The audience value to copy into your Microsoft Entra app registration's OIDC.", + "Issuer": "The issuer value to copy into your Microsoft Entra app registration's OIDC.", + "Subject": "The subject value to copy into your Microsoft Entra app registration's OIDC." + }, "AWS::Panorama::ApplicationInstance": { "ApplicationInstanceIdToReplace": "The ID of an application instance to replace with the new instance.", "DefaultRuntimeContextDevice": "The device's ID.", @@ -28618,6 +29817,7 @@ "DesiredState": "The state the pipe should be in.", "Enrichment": "The ARN of the enrichment resource.", "EnrichmentParameters": "The parameters required to set up enrichment on your pipe.", + "KmsKeyIdentifier": "The identifier of the AWS KMS customer managed key for EventBridge to use, if you choose to use a customer managed key to encrypt pipe data. The identifier can be the key Amazon Resource Name (ARN), KeyId, key alias, or key alias ARN.\n\nTo update a pipe that is using the default AWS owned key to use a customer managed key instead, or update a pipe that is using a customer managed key to use a different customer managed key, specify a customer managed key identifier.\n\nTo update a pipe that is using a customer managed key to use the default AWS owned key , specify an empty string.\n\nFor more information, see [Managing keys](https://docs.aws.amazon.com/kms/latest/developerguide/getting-started.html) in the *AWS Key Management Service Developer Guide* .", "LogConfiguration": "The logging configuration settings for the pipe.", "Name": "The name of the pipe.", "RoleArn": "The ARN of the role that allows the pipe to send data to the target.", @@ -28764,21 +29964,21 @@ "BatchSize": "The maximum number of records to include in each batch.", "DeadLetterConfig": "Define the target queue to send dead-letter queue events to.", "MaximumBatchingWindowInSeconds": "The maximum length of a time to wait for events.", - "MaximumRecordAgeInSeconds": "(Streams only) Discard records older than the specified age. The default value is -1, which sets the maximum age to infinite. When the value is set to infinite, EventBridge never discards old records.", - "MaximumRetryAttempts": "(Streams only) Discard records after the specified number of retries. The default value is -1, which sets the maximum number of retries to infinite. When MaximumRetryAttempts is infinite, EventBridge retries failed records until the record expires in the event source.", - "OnPartialBatchItemFailure": "(Streams only) Define how to handle item process failures. `AUTOMATIC_BISECT` halves each batch and retry each half until all the records are processed or there is one failed message left in the batch.", - "ParallelizationFactor": "(Streams only) The number of batches to process concurrently from each shard. The default value is 1.", + "MaximumRecordAgeInSeconds": "Discard records older than the specified age. The default value is -1, which sets the maximum age to infinite. When the value is set to infinite, EventBridge never discards old records.", + "MaximumRetryAttempts": "Discard records after the specified number of retries. The default value is -1, which sets the maximum number of retries to infinite. When MaximumRetryAttempts is infinite, EventBridge retries failed records until the record expires in the event source.", + "OnPartialBatchItemFailure": "Define how to handle item process failures. `AUTOMATIC_BISECT` halves each batch and retry each half until all the records are processed or there is one failed message left in the batch.", + "ParallelizationFactor": "The number of batches to process concurrently from each shard. The default value is 1.", "StartingPosition": "(Streams only) The position in a stream from which to start reading.\n\n*Valid values* : `TRIM_HORIZON | LATEST`" }, "AWS::Pipes::Pipe PipeSourceKinesisStreamParameters": { "BatchSize": "The maximum number of records to include in each batch.", "DeadLetterConfig": "Define the target queue to send dead-letter queue events to.", "MaximumBatchingWindowInSeconds": "The maximum length of a time to wait for events.", - "MaximumRecordAgeInSeconds": "(Streams only) Discard records older than the specified age. The default value is -1, which sets the maximum age to infinite. When the value is set to infinite, EventBridge never discards old records.", - "MaximumRetryAttempts": "(Streams only) Discard records after the specified number of retries. The default value is -1, which sets the maximum number of retries to infinite. When MaximumRetryAttempts is infinite, EventBridge retries failed records until the record expires in the event source.", - "OnPartialBatchItemFailure": "(Streams only) Define how to handle item process failures. `AUTOMATIC_BISECT` halves each batch and retry each half until all the records are processed or there is one failed message left in the batch.", - "ParallelizationFactor": "(Streams only) The number of batches to process concurrently from each shard. The default value is 1.", - "StartingPosition": "(Streams only) The position in a stream from which to start reading.", + "MaximumRecordAgeInSeconds": "Discard records older than the specified age. The default value is -1, which sets the maximum age to infinite. When the value is set to infinite, EventBridge never discards old records.", + "MaximumRetryAttempts": "Discard records after the specified number of retries. The default value is -1, which sets the maximum number of retries to infinite. When MaximumRetryAttempts is infinite, EventBridge retries failed records until the record expires in the event source.", + "OnPartialBatchItemFailure": "Define how to handle item process failures. `AUTOMATIC_BISECT` halves each batch and retry each half until all the records are processed or there is one failed message left in the batch.", + "ParallelizationFactor": "The number of batches to process concurrently from each shard. The default value is 1.", + "StartingPosition": "The position in a stream from which to start reading.", "StartingPositionTimestamp": "With `StartingPosition` set to `AT_TIMESTAMP` , the time from which to start reading, in Unix time seconds." }, "AWS::Pipes::Pipe PipeSourceManagedStreamingKafkaParameters": { @@ -28786,7 +29986,7 @@ "ConsumerGroupID": "The name of the destination queue to consume.", "Credentials": "The credentials needed to access the resource.", "MaximumBatchingWindowInSeconds": "The maximum length of a time to wait for events.", - "StartingPosition": "(Streams only) The position in a stream from which to start reading.", + "StartingPosition": "The position in a stream from which to start reading.", "TopicName": "The name of the topic that the pipe will read from." }, "AWS::Pipes::Pipe PipeSourceParameters": { @@ -28813,7 +30013,7 @@ "Credentials": "The credentials needed to access the resource.", "MaximumBatchingWindowInSeconds": "The maximum length of a time to wait for events.", "ServerRootCaCertificate": "The ARN of the Secrets Manager secret used for certification.", - "StartingPosition": "(Streams only) The position in a stream from which to start reading.", + "StartingPosition": "The position in a stream from which to start reading.", "TopicName": "The name of the topic that the pipe will read from.", "Vpc": "This structure specifies the VPC subnets and security groups for the stream, and whether a public IP address is to be used." }, @@ -28909,7 +30109,7 @@ "SingleMeasureMappings": "Mappings of single source data fields to individual records in the specified Timestream for LiveAnalytics table.", "TimeFieldType": "The type of time value used.\n\nThe default is `EPOCH` .", "TimeValue": "Dynamic path to the source data field that represents the time value for your data.", - "TimestampFormat": "How to format the timestamps. For example, `YYYY-MM-DDThh:mm:ss.sssTZD` .\n\nRequired if `TimeFieldType` is specified as `TIMESTAMP_FORMAT` .", + "TimestampFormat": "How to format the timestamps. For example, `yyyy-MM-dd'T'HH:mm:ss'Z'` .\n\nRequired if `TimeFieldType` is specified as `TIMESTAMP_FORMAT` .", "VersionValue": "64 bit version value or source data field that represents the version value for your data.\n\nWrite requests with a higher version number will update the existing measure values of the record and version. In cases where the measure value is the same, the version will still be updated.\n\nDefault value is 1.\n\nTimestream for LiveAnalytics does not support updating partial measure values in a record.\n\nWrite requests for duplicate data with a higher version number will update the existing measure value and version. In cases where the measure value is the same, `Version` will still be updated. Default value is `1` .\n\n> `Version` must be `1` or greater, or you will receive a `ValidationException` error." }, "AWS::Pipes::Pipe PlacementConstraint": { @@ -28923,7 +30123,7 @@ "AWS::Pipes::Pipe S3LogDestination": { "BucketName": "The name of the Amazon S3 bucket to which EventBridge delivers the log records for the pipe.", "BucketOwner": "The AWS account that owns the Amazon S3 bucket to which EventBridge delivers the log records for the pipe.", - "OutputFormat": "The format EventBridge uses for the log records.\n\n- `json` : JSON\n- `plain` : Plain text\n- `w3c` : [W3C extended logging file format](https://docs.aws.amazon.com/https://www.w3.org/TR/WD-logfile)", + "OutputFormat": "The format EventBridge uses for the log records.\n\nEventBridge currently only supports `json` formatting.", "Prefix": "The prefix text with which to begin Amazon S3 log object names.\n\nFor more information, see [Organizing objects using prefixes](https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-prefixes.html) in the *Amazon Simple Storage Service User Guide* ." }, "AWS::Pipes::Pipe SageMakerPipelineParameter": { @@ -28937,7 +30137,7 @@ "SaslScram512Auth": "The ARN of the Secrets Manager secret." }, "AWS::Pipes::Pipe SelfManagedKafkaAccessConfigurationVpc": { - "SecurityGroup": "Specifies the security groups associated with the stream. These security groups must all be in the same VPC. You can specify as many as five security groups. If you do not specify a security group, the default security group for the VPC is used.", + "SecurityGroup": "Specifies the security groups associated with the stream. These security groups must all be in the same VPC. You can specify as many as five security groups.", "Subnets": "Specifies the subnets associated with the stream. These subnets must all be in the same VPC. You can specify as many as 16 subnets." }, "AWS::Pipes::Pipe SingleMeasureMapping": { @@ -28988,20 +30188,32 @@ }, "AWS::QBusiness::Application": { "AttachmentsConfiguration": "Configuration information for the file upload during chat feature.", + "AutoSubscriptionConfiguration": "Subscription configuration information for an Amazon Q Business application using IAM identity federation for user management.", + "ClientIdsForOIDC": "", "Description": "A description for the Amazon Q Business application.", "DisplayName": "The name of the Amazon Q Business application.", "EncryptionConfiguration": "Provides the identifier of the AWS KMS key used to encrypt data indexed by Amazon Q Business. Amazon Q Business doesn't support asymmetric keys.", + "IamIdentityProviderArn": "The Amazon Resource Name (ARN) of an identity provider being used by an Amazon Q Business application.", "IdentityCenterInstanceArn": "The Amazon Resource Name (ARN) of the IAM Identity Center instance you are either creating for\u2014or connecting to\u2014your Amazon Q Business application.\n\n*Required* : `Yes`", - "QAppsConfiguration": "Configuration information about Amazon Q Apps. (preview feature)", - "RoleArn": "The Amazon Resource Name (ARN) of an IAM role with permissions to access your Amazon CloudWatch logs and metrics.", + "IdentityType": "The authentication type being used by a Amazon Q Business application.", + "PersonalizationConfiguration": "Configuration information about chat response personalization. For more information, see [Personalizing chat responses](https://docs.aws.amazon.com/amazonq/latest/qbusiness-ug/personalizing-chat-responses.html) .", + "QAppsConfiguration": "Configuration information about Amazon Q Apps.", + "RoleArn": "The Amazon Resource Name (ARN) of an IAM role with permissions to access your Amazon CloudWatch logs and metrics. If this property is not specified, Amazon Q Business will create a [service linked role (SLR)](https://docs.aws.amazon.com/amazonq/latest/qbusiness-ug/using-service-linked-roles.html#slr-permissions) and use it as the application's role.", "Tags": "A list of key-value pairs that identify or categorize your Amazon Q Business application. You can also use tags to help control access to the application. Tag keys and values can consist of Unicode letters, digits, white space, and any of the following symbols: _ . : / = + - @." }, "AWS::QBusiness::Application AttachmentsConfiguration": { "AttachmentsControlMode": "Status information about whether file upload functionality is activated or deactivated for your end user." }, + "AWS::QBusiness::Application AutoSubscriptionConfiguration": { + "AutoSubscribe": "Describes whether automatic subscriptions are enabled for an Amazon Q Business application using IAM identity federation for user management.", + "DefaultSubscriptionType": "Describes the default subscription type assigned to an Amazon Q Business application using IAM identity federation for user management. If the value for `autoSubscribe` is set to `ENABLED` you must select a value for this field." + }, "AWS::QBusiness::Application EncryptionConfiguration": { "KmsKeyId": "The identifier of the AWS KMS key. Amazon Q Business doesn't support asymmetric keys." }, + "AWS::QBusiness::Application PersonalizationConfiguration": { + "PersonalizationControlMode": "An option to allow Amazon Q Business to customize chat responses using user specific metadata\u2014specifically, location and job information\u2014in your IAM Identity Center instance." + }, "AWS::QBusiness::Application QAppsConfiguration": { "QAppsControlMode": "Status information about whether end users can create and use Amazon Q Apps in the web experience." }, @@ -29011,7 +30223,7 @@ }, "AWS::QBusiness::DataSource": { "ApplicationId": "The identifier of the Amazon Q Business application the data source will be attached to.", - "Configuration": "Configuration information to connect your data source repository to Amazon Q Business. Use this parameter to provide a JSON schema with configuration information specific to your data source connector.\n\nEach data source has a JSON schema provided by Amazon Q Business that you must use. For example, the Amazon S3 and Web Crawler connectors require the following JSON schemas:\n\n- [Amazon S3 JSON schema](https://docs.aws.amazon.com/amazonq/latest/qbusiness-ug/s3-api.html)\n- [Web Crawler JSON schema](https://docs.aws.amazon.com/amazonq/latest/qbusiness-ug/web-crawler-api.html)\n\nYou can find configuration templates for your specific data source using the following steps:\n\n- Navigate to the [Supported connectors](https://docs.aws.amazon.com/amazonq/latest/business-use-dg/connectors-list.html) page in the Amazon Q Business User Guide, and select the data source of your choice.\n- Then, from your specific data source connector page, select *Using the API* . You will find the JSON schema for your data source, including parameter descriptions, in this section.", + "Configuration": "Use this property to specify a JSON or YAML schema with configuration information specific to your data source connector to connect your data source repository to Amazon Q Business . You must use the JSON or YAML schema provided by Amazon Q .\n\nYou can find configuration templates for your specific data source using the following steps:\n\n- Navigate to the [Supported connectors](https://docs.aws.amazon.com/amazonq/latest/business-use-dg/connectors-list.html) page in the Amazon Q Business User Guide, and select the data source connector of your choice.\n- Then, from that specific data source connector's page, select *Using AWS CloudFormation* to find the schemas for your data source connector, including parameter descriptions and examples.", "Description": "A description for the data source connector.", "DisplayName": "The name of the Amazon Q Business data source.", "DocumentEnrichmentConfiguration": "Provides the configuration information for altering document metadata and content during the document ingestion process.\n\nFor more information, see [Custom document enrichment](https://docs.aws.amazon.com/amazonq/latest/business-use-dg/custom-document-enrichment.html) .", @@ -29153,6 +30365,7 @@ }, "AWS::QBusiness::WebExperience": { "ApplicationId": "The identifier of the Amazon Q Business web experience.", + "IdentityProviderConfiguration": "Provides information about the identity provider (IdP) used to authenticate end users of an Amazon Q Business web experience.", "RoleArn": "The Amazon Resource Name (ARN) of the service role attached to your web experience.\n\n> You must provide this value if you're using IAM Identity Center to manage end user access to your application. If you're using legacy identity management to manage user access, you don't need to provide this value.", "SamplePromptsControlMode": "Determines whether sample prompts are enabled in the web experience for an end user.", "Subtitle": "A subtitle to personalize your Amazon Q Business web experience.", @@ -29160,6 +30373,17 @@ "Title": "The title for your Amazon Q Business web experience.", "WelcomeMessage": "A message in an Amazon Q Business web experience." }, + "AWS::QBusiness::WebExperience IdentityProviderConfiguration": { + "OpenIDConnectConfiguration": "", + "SamlConfiguration": "" + }, + "AWS::QBusiness::WebExperience OpenIDConnectProviderConfiguration": { + "SecretsArn": "The Amazon Resource Name (ARN) of a Secrets Manager secret containing the OIDC client secret.", + "SecretsRole": "An IAM role with permissions to access AWS KMS to decrypt the Secrets Manager secret containing your OIDC client secret." + }, + "AWS::QBusiness::WebExperience SamlProviderConfiguration": { + "AuthenticationUrl": "The URL where Amazon Q Business end users will be redirected for authentication." + }, "AWS::QBusiness::WebExperience Tag": { "Key": "The key for the tag. Keys are not case sensitive and must be unique for the Amazon Q Business application or data source.", "Value": "The value associated with the tag. The value may be an empty string but it can't be null." @@ -29229,6 +30453,7 @@ "FilterGroups": "Filter definitions for an analysis.\n\nFor more information, see [Filtering Data in Amazon QuickSight](https://docs.aws.amazon.com/quicksight/latest/user/adding-a-filter.html) in the *Amazon QuickSight User Guide* .", "Options": "An array of option definitions for an analysis.", "ParameterDeclarations": "An array of parameter declarations for an analysis.\n\nParameters are named variables that can transfer a value for use by an action or an object.\n\nFor more information, see [Parameters in Amazon QuickSight](https://docs.aws.amazon.com/quicksight/latest/user/parameters-in-quicksight.html) in the *Amazon QuickSight User Guide* .", + "QueryExecutionOptions": "", "Sheets": "An array of sheet definitions for an analysis. Each `SheetDefinition` provides detailed information about a sheet within this analysis." }, "AWS::QuickSight::Analysis AnalysisError": { @@ -29456,6 +30681,11 @@ "CustomFilterListConfiguration": "A list of custom filter values. In the Amazon QuickSight console, this filter type is called a custom filter list.", "FilterListConfiguration": "A list of filter configurations. In the Amazon QuickSight console, this filter type is called a filter list." }, + "AWS::QuickSight::Analysis CategoryInnerFilter": { + "Column": "", + "Configuration": "", + "DefaultFilterControlConfiguration": "" + }, "AWS::QuickSight::Analysis ChartAxisLabelOptions": { "AxisLabelOptions": "The label options for a chart axis.", "SortIconVisibility": "The visibility configuration of the sort icon on a chart's axis label.", @@ -29499,6 +30729,7 @@ "Aggregation": "The aggregation function of the column tooltip item.", "Column": "The target column of the tooltip item.", "Label": "The label of the tooltip item.", + "TooltipTarget": "Determines the target of the column tooltip item in a combo chart visual.", "Visibility": "The visibility of the tooltip item." }, "AWS::QuickSight::Analysis ComboChartAggregatedFieldWells": { @@ -29521,6 +30752,7 @@ "ReferenceLines": "The reference line setup of the visual.", "SecondaryYAxisDisplayOptions": "The label display options (grid line, range, scale, axis step) of a combo chart's secondary y-axis (line) field well.", "SecondaryYAxisLabelOptions": "The label options (label text, label visibility, and sort icon visibility) of a combo chart's secondary y-axis(line) field well.", + "SingleAxisOptions": "", "SortConfiguration": "The sort configuration of a `ComboChartVisual` .", "Tooltip": "The legend display setup of the visual.", "VisualPalette": "The palette (chart color) display setup of the visual." @@ -29804,6 +31036,7 @@ "ValueWhenUnsetOption": "The built-in options for default values. The value can be one of the following:\n\n- `RECOMMENDED` : The recommended value.\n- `NULL` : The `NULL` value." }, "AWS::QuickSight::Analysis DefaultDateTimePickerControlOptions": { + "CommitMode": "The visibility configuration of the Apply button on a `DateTimePickerControl` .", "DisplayOptions": "The display options of a control.", "Type": "The date time picker type of the `DefaultDateTimePickerControlOptions` . Choose one of the following options:\n\n- `SINGLE_VALUED` : The filter condition is a fixed date.\n- `DATE_RANGE` : The filter condition is a date time range." }, @@ -29821,6 +31054,7 @@ "DefaultTextFieldOptions": "The default options that correspond to the `TextField` filter control type." }, "AWS::QuickSight::Analysis DefaultFilterDropDownControlOptions": { + "CommitMode": "The visibility configuration of the Apply button on a `FilterDropDownControl` .", "DisplayOptions": "The display options of a control.", "SelectableValues": "A list of selectable values that are used in a control.", "Type": "The type of the `FilterDropDownControl` . Choose one of the following options:\n\n- `MULTI_SELECT` : The user can select multiple entries from a dropdown menu.\n- `SINGLE_SELECT` : The user can select a single entry from a dropdown menu." @@ -29849,6 +31083,7 @@ "SectionBased": "The options that determine the default settings for a section-based layout configuration." }, "AWS::QuickSight::Analysis DefaultRelativeDateTimeControlOptions": { + "CommitMode": "The visibility configuration of the Apply button on a `RelativeDateTimeControl` .", "DisplayOptions": "The display options of a control." }, "AWS::QuickSight::Analysis DefaultSectionBasedLayoutConfiguration": { @@ -29945,6 +31180,7 @@ "AWS::QuickSight::Analysis FieldTooltipItem": { "FieldId": "The unique ID of the field that is targeted by the tooltip.", "Label": "The label of the tooltip item.", + "TooltipTarget": "Determines the target of the field tooltip item in a combo chart visual.", "Visibility": "The visibility of the tooltip item." }, "AWS::QuickSight::Analysis FilledMapAggregatedFieldWells": { @@ -29986,6 +31222,7 @@ }, "AWS::QuickSight::Analysis Filter": { "CategoryFilter": "A `CategoryFilter` filters text values.\n\nFor more information, see [Adding text filters](https://docs.aws.amazon.com/quicksight/latest/user/add-a-text-filter-data-prep.html) in the *Amazon QuickSight User Guide* .", + "NestedFilter": "A `NestedFilter` filters data with a subset of data that is defined by the nested inner filter.", "NumericEqualityFilter": "A `NumericEqualityFilter` filters numeric values that equal or do not equal a given numeric value.", "NumericRangeFilter": "A `NumericRangeFilter` filters numeric values that are either inside or outside a given numeric range.", "RelativeDatesFilter": "A `RelativeDatesFilter` filters date values that are relative to a given date.", @@ -30009,6 +31246,7 @@ "SourceFilterId": "The source filter ID of the `FilterCrossSheetControl` ." }, "AWS::QuickSight::Analysis FilterDateTimePickerControl": { + "CommitMode": "The visibility configurationof the Apply button on a `DateTimePickerControl` .", "DisplayOptions": "The display options of a control.", "FilterControlId": "The ID of the `FilterDateTimePickerControl` .", "SourceFilterId": "The source filter ID of the `FilterDateTimePickerControl` .", @@ -30017,6 +31255,7 @@ }, "AWS::QuickSight::Analysis FilterDropDownControl": { "CascadingControlConfiguration": "The values that are displayed in a control can be configured to only show values that are valid based on what's selected in other controls.", + "CommitMode": "The visibility configuration of the Apply button on a `FilterDropDownControl` .", "DisplayOptions": "The display options of the `FilterDropDownControl` .", "FilterControlId": "The ID of the `FilterDropDownControl` .", "SelectableValues": "A list of selectable values that are used in a control.", @@ -30055,6 +31294,7 @@ "SameSheetTargetVisualConfiguration": "The configuration of the same-sheet target visuals that you want to be filtered." }, "AWS::QuickSight::Analysis FilterRelativeDateTimeControl": { + "CommitMode": "The visibility configuration of the Apply button on a `FilterRelativeDateTimeControl` .", "DisplayOptions": "The display options of a control.", "FilterControlId": "The ID of the `FilterTextAreaControl` .", "SourceFilterId": "The source filter ID of the `FilterTextAreaControl` .", @@ -30397,6 +31637,9 @@ "Title": "The title that is displayed on the visual.", "VisualId": "The unique identifier of a visual. This identifier must be unique within the context of a dashboard, template, or analysis. Two dashboards, analyses, or templates can have visuals with the same identifiers." }, + "AWS::QuickSight::Analysis InnerFilter": { + "CategoryInnerFilter": "A `CategoryInnerFilter` filters text values for the `NestedFilter` ." + }, "AWS::QuickSight::Analysis InsightConfiguration": { "Computations": "The computations configurations of the insight visual", "CustomNarrative": "The custom narrative of the insight visual." @@ -30540,6 +31783,7 @@ "SecondaryYAxisDisplayOptions": "The series axis configuration of a line chart.", "SecondaryYAxisLabelOptions": "The options that determine the presentation of the secondary y-axis label.", "Series": "The series item configuration of a line chart.", + "SingleAxisOptions": "", "SmallMultiplesOptions": "The small multiples setup for the visual.", "SortConfiguration": "The sort configuration of a line chart.", "Tooltip": "The tooltip configuration of a line chart.", @@ -30649,6 +31893,12 @@ "AWS::QuickSight::Analysis NegativeValueConfiguration": { "DisplayMode": "Determines the display mode of the negative value configuration." }, + "AWS::QuickSight::Analysis NestedFilter": { + "Column": "The column that the filter is applied to.", + "FilterId": "An identifier that uniquely identifies a filter within a dashboard, analysis, or template.", + "IncludeInnerSet": "A boolean condition to include or exclude the subset that is defined by the values of the nested inner filter.", + "InnerFilter": "The `InnerFilter` defines the subset of data to be used with the `NestedFilter` ." + }, "AWS::QuickSight::Analysis NullValueFormatConfiguration": { "NullString": "Determines the null string of null values." }, @@ -30766,6 +32016,7 @@ }, "AWS::QuickSight::Analysis ParameterDropDownControl": { "CascadingControlConfiguration": "The values that are displayed in a control can be configured to only show values that are valid based on what's selected in other controls.", + "CommitMode": "The visibility configuration of the Apply button on a `ParameterDropDownControl` .", "DisplayOptions": "The display options of a control.", "ParameterControlId": "The ID of the `ParameterDropDownControl` .", "SelectableValues": "A list of selectable values that are used in a control.", @@ -31000,6 +32251,9 @@ "AWS::QuickSight::Analysis ProgressBarOptions": { "Visibility": "The visibility of the progress bar." }, + "AWS::QuickSight::Analysis QueryExecutionOptions": { + "QueryExecutionMode": "A structure that describes the query execution mode." + }, "AWS::QuickSight::Analysis RadarChartAggregatedFieldWells": { "Category": "The aggregated field well categories of a radar chart.", "Color": "The color that are assigned to the aggregated field wells of a radar chart.", @@ -31282,6 +32536,9 @@ "AWS::QuickSight::Analysis SimpleClusterMarker": { "Color": "The color of the simple cluster marker." }, + "AWS::QuickSight::Analysis SingleAxisOptions": { + "YAxisOptions": "The Y axis options of a single axis configuration." + }, "AWS::QuickSight::Analysis SliderControlDisplayOptions": { "InfoIconLabelOptions": "The configuration of info icon label options.", "TitleOptions": "The options to configure the title visibility, name, and font size." @@ -31770,6 +33027,9 @@ "Title": "The title that is displayed on the visual.", "VisualId": "The unique identifier of a visual. This identifier must be unique within the context of a dashboard, template, or analysis. Two dashboards, analyses, or templates can have visuals with the same identifiers.." }, + "AWS::QuickSight::Analysis YAxisOptions": { + "YAxis": "The Y axis type to be used in the chart.\n\nIf you choose `PRIMARY_Y_AXIS` , the primary Y Axis is located on the leftmost vertical axis of the chart." + }, "AWS::QuickSight::Dashboard": { "AwsAccountId": "The ID of the AWS account where you want to create the dashboard.", "DashboardId": "The ID for the dashboard, also added to the IAM policy.", @@ -32016,6 +33276,11 @@ "CustomFilterListConfiguration": "A list of custom filter values. In the Amazon QuickSight console, this filter type is called a custom filter list.", "FilterListConfiguration": "A list of filter configurations. In the Amazon QuickSight console, this filter type is called a filter list." }, + "AWS::QuickSight::Dashboard CategoryInnerFilter": { + "Column": "", + "Configuration": "", + "DefaultFilterControlConfiguration": "" + }, "AWS::QuickSight::Dashboard ChartAxisLabelOptions": { "AxisLabelOptions": "The label options for a chart axis.", "SortIconVisibility": "The visibility configuration of the sort icon on a chart's axis label.", @@ -32059,6 +33324,7 @@ "Aggregation": "The aggregation function of the column tooltip item.", "Column": "The target column of the tooltip item.", "Label": "The label of the tooltip item.", + "TooltipTarget": "Determines the target of the column tooltip item in a combo chart visual.", "Visibility": "The visibility of the tooltip item." }, "AWS::QuickSight::Dashboard ComboChartAggregatedFieldWells": { @@ -32081,6 +33347,7 @@ "ReferenceLines": "The reference line setup of the visual.", "SecondaryYAxisDisplayOptions": "The label display options (grid line, range, scale, axis step) of a combo chart's secondary y-axis (line) field well.", "SecondaryYAxisLabelOptions": "The label options (label text, label visibility, and sort icon visibility) of a combo chart's secondary y-axis(line) field well.", + "SingleAxisOptions": "", "SortConfiguration": "The sort configuration of a `ComboChartVisual` .", "Tooltip": "The legend display setup of the visual.", "VisualPalette": "The palette (chart color) display setup of the visual." @@ -32423,6 +33690,7 @@ "ValueWhenUnsetOption": "The built-in options for default values. The value can be one of the following:\n\n- `RECOMMENDED` : The recommended value.\n- `NULL` : The `NULL` value." }, "AWS::QuickSight::Dashboard DefaultDateTimePickerControlOptions": { + "CommitMode": "The visibility configuration of the Apply button on a `DateTimePickerControl` .", "DisplayOptions": "The display options of a control.", "Type": "The date time picker type of the `DefaultDateTimePickerControlOptions` . Choose one of the following options:\n\n- `SINGLE_VALUED` : The filter condition is a fixed date.\n- `DATE_RANGE` : The filter condition is a date time range." }, @@ -32440,6 +33708,7 @@ "DefaultTextFieldOptions": "The default options that correspond to the `TextField` filter control type." }, "AWS::QuickSight::Dashboard DefaultFilterDropDownControlOptions": { + "CommitMode": "The visibility configuration of the Apply button on a `FilterDropDownControl` .", "DisplayOptions": "The display options of a control.", "SelectableValues": "A list of selectable values that are used in a control.", "Type": "The type of the `FilterDropDownControl` . Choose one of the following options:\n\n- `MULTI_SELECT` : The user can select multiple entries from a dropdown menu.\n- `SINGLE_SELECT` : The user can select a single entry from a dropdown menu." @@ -32468,6 +33737,7 @@ "SectionBased": "The options that determine the default settings for a section-based layout configuration." }, "AWS::QuickSight::Dashboard DefaultRelativeDateTimeControlOptions": { + "CommitMode": "The visibility configuration of the Apply button on a `RelativeDateTimeControl` .", "DisplayOptions": "The display options of a control." }, "AWS::QuickSight::Dashboard DefaultSectionBasedLayoutConfiguration": { @@ -32573,6 +33843,7 @@ "AWS::QuickSight::Dashboard FieldTooltipItem": { "FieldId": "The unique ID of the field that is targeted by the tooltip.", "Label": "The label of the tooltip item.", + "TooltipTarget": "Determines the target of the field tooltip item in a combo chart visual.", "Visibility": "The visibility of the tooltip item." }, "AWS::QuickSight::Dashboard FilledMapAggregatedFieldWells": { @@ -32614,6 +33885,7 @@ }, "AWS::QuickSight::Dashboard Filter": { "CategoryFilter": "A `CategoryFilter` filters text values.\n\nFor more information, see [Adding text filters](https://docs.aws.amazon.com/quicksight/latest/user/add-a-text-filter-data-prep.html) in the *Amazon QuickSight User Guide* .", + "NestedFilter": "A `NestedFilter` filters data with a subset of data that is defined by the nested inner filter.", "NumericEqualityFilter": "A `NumericEqualityFilter` filters numeric values that equal or do not equal a given numeric value.", "NumericRangeFilter": "A `NumericRangeFilter` filters numeric values that are either inside or outside a given numeric range.", "RelativeDatesFilter": "A `RelativeDatesFilter` filters date values that are relative to a given date.", @@ -32637,6 +33909,7 @@ "SourceFilterId": "The source filter ID of the `FilterCrossSheetControl` ." }, "AWS::QuickSight::Dashboard FilterDateTimePickerControl": { + "CommitMode": "The visibility configurationof the Apply button on a `DateTimePickerControl` .", "DisplayOptions": "The display options of a control.", "FilterControlId": "The ID of the `FilterDateTimePickerControl` .", "SourceFilterId": "The source filter ID of the `FilterDateTimePickerControl` .", @@ -32645,6 +33918,7 @@ }, "AWS::QuickSight::Dashboard FilterDropDownControl": { "CascadingControlConfiguration": "The values that are displayed in a control can be configured to only show values that are valid based on what's selected in other controls.", + "CommitMode": "The visibility configuration of the Apply button on a `FilterDropDownControl` .", "DisplayOptions": "The display options of the `FilterDropDownControl` .", "FilterControlId": "The ID of the `FilterDropDownControl` .", "SelectableValues": "A list of selectable values that are used in a control.", @@ -32683,6 +33957,7 @@ "SameSheetTargetVisualConfiguration": "The configuration of the same-sheet target visuals that you want to be filtered." }, "AWS::QuickSight::Dashboard FilterRelativeDateTimeControl": { + "CommitMode": "The visibility configuration of the Apply button on a `FilterRelativeDateTimeControl` .", "DisplayOptions": "The display options of a control.", "FilterControlId": "The ID of the `FilterTextAreaControl` .", "SourceFilterId": "The source filter ID of the `FilterTextAreaControl` .", @@ -33025,6 +34300,9 @@ "Title": "The title that is displayed on the visual.", "VisualId": "The unique identifier of a visual. This identifier must be unique within the context of a dashboard, template, or analysis. Two dashboards, analyses, or templates can have visuals with the same identifiers." }, + "AWS::QuickSight::Dashboard InnerFilter": { + "CategoryInnerFilter": "A `CategoryInnerFilter` filters text values for the `NestedFilter` ." + }, "AWS::QuickSight::Dashboard InsightConfiguration": { "Computations": "The computations configurations of the insight visual", "CustomNarrative": "The custom narrative of the insight visual." @@ -33168,6 +34446,7 @@ "SecondaryYAxisDisplayOptions": "The series axis configuration of a line chart.", "SecondaryYAxisLabelOptions": "The options that determine the presentation of the secondary y-axis label.", "Series": "The series item configuration of a line chart.", + "SingleAxisOptions": "", "SmallMultiplesOptions": "The small multiples setup for the visual.", "SortConfiguration": "The sort configuration of a line chart.", "Tooltip": "The tooltip configuration of a line chart.", @@ -33280,6 +34559,12 @@ "AWS::QuickSight::Dashboard NegativeValueConfiguration": { "DisplayMode": "Determines the display mode of the negative value configuration." }, + "AWS::QuickSight::Dashboard NestedFilter": { + "Column": "The column that the filter is applied to.", + "FilterId": "An identifier that uniquely identifies a filter within a dashboard, analysis, or template.", + "IncludeInnerSet": "A boolean condition to include or exclude the subset that is defined by the values of the nested inner filter.", + "InnerFilter": "The `InnerFilter` defines the subset of data to be used with the `NestedFilter` ." + }, "AWS::QuickSight::Dashboard NullValueFormatConfiguration": { "NullString": "Determines the null string of null values." }, @@ -33397,6 +34682,7 @@ }, "AWS::QuickSight::Dashboard ParameterDropDownControl": { "CascadingControlConfiguration": "The values that are displayed in a control can be configured to only show values that are valid based on what's selected in other controls.", + "CommitMode": "The visibility configuration of the Apply button on a `ParameterDropDownControl` .", "DisplayOptions": "The display options of a control.", "ParameterControlId": "The ID of the `ParameterDropDownControl` .", "SelectableValues": "A list of selectable values that are used in a control.", @@ -33919,6 +35205,9 @@ "AWS::QuickSight::Dashboard SimpleClusterMarker": { "Color": "The color of the simple cluster marker." }, + "AWS::QuickSight::Dashboard SingleAxisOptions": { + "YAxisOptions": "The Y axis options of a single axis configuration." + }, "AWS::QuickSight::Dashboard SliderControlDisplayOptions": { "InfoIconLabelOptions": "The configuration of info icon label options.", "TitleOptions": "The options to configure the title visibility, name, and font size." @@ -34413,6 +35702,9 @@ "Title": "The title that is displayed on the visual.", "VisualId": "The unique identifier of a visual. This identifier must be unique within the context of a dashboard, template, or analysis. Two dashboards, analyses, or templates can have visuals with the same identifiers.." }, + "AWS::QuickSight::Dashboard YAxisOptions": { + "YAxis": "The Y axis type to be used in the chart.\n\nIf you choose `PRIMARY_Y_AXIS` , the primary Y Axis is located on the leftmost vertical axis of the chart." + }, "AWS::QuickSight::DataSet": { "AwsAccountId": "The AWS account ID.", "ColumnGroups": "Groupings of columns that work together in certain Amazon QuickSight features. Currently, only geospatial hierarchy is supported.", @@ -34422,6 +35714,7 @@ "DataSetUsageConfiguration": "The usage configuration to apply to child datasets that reference this dataset as a source.", "DatasetParameters": "The parameters that are declared in a dataset.", "FieldFolders": "The folder that contains fields and nested subfolders for your dataset.", + "FolderArns": "", "ImportMode": "Indicates whether you want to import the data into SPICE.", "IngestionWaitPolicy": "The wait policy to use when creating or updating a Dataset. The default is to wait for SPICE ingestion to finish with timeout of 36 hours.", "LogicalTableMap": "Configures the combination and transformation of the data from the physical tables.", @@ -34835,6 +36128,24 @@ "AWS::QuickSight::DataSource VpcConnectionProperties": { "VpcConnectionArn": "The Amazon Resource Name (ARN) for the VPC connection." }, + "AWS::QuickSight::Folder": { + "AwsAccountId": "", + "FolderId": "The ID of the folder.", + "FolderType": "The type of folder it is.", + "Name": "A display name for the folder.", + "ParentFolderArn": "A new parent folder arn. This change can only be applied if the import creates a brand new folder. Existing folders cannot be moved.", + "Permissions": "", + "SharingModel": "The sharing scope of the folder.", + "Tags": "A list of tags for the folders that you want to apply overrides to." + }, + "AWS::QuickSight::Folder ResourcePermission": { + "Actions": "", + "Principal": "" + }, + "AWS::QuickSight::Folder Tag": { + "Key": "", + "Value": "" + }, "AWS::QuickSight::RefreshSchedule": { "AwsAccountId": "The AWS account ID of the account that you are creating a schedule in.", "DataSetId": "The ID of the dataset that you are creating a refresh schedule for.", @@ -35094,6 +36405,11 @@ "CustomFilterListConfiguration": "A list of custom filter values. In the Amazon QuickSight console, this filter type is called a custom filter list.", "FilterListConfiguration": "A list of filter configurations. In the Amazon QuickSight console, this filter type is called a filter list." }, + "AWS::QuickSight::Template CategoryInnerFilter": { + "Column": "", + "Configuration": "", + "DefaultFilterControlConfiguration": "" + }, "AWS::QuickSight::Template ChartAxisLabelOptions": { "AxisLabelOptions": "The label options for a chart axis.", "SortIconVisibility": "The visibility configuration of the sort icon on a chart's axis label.", @@ -35149,6 +36465,7 @@ "Aggregation": "The aggregation function of the column tooltip item.", "Column": "The target column of the tooltip item.", "Label": "The label of the tooltip item.", + "TooltipTarget": "Determines the target of the column tooltip item in a combo chart visual.", "Visibility": "The visibility of the tooltip item." }, "AWS::QuickSight::Template ComboChartAggregatedFieldWells": { @@ -35171,6 +36488,7 @@ "ReferenceLines": "The reference line setup of the visual.", "SecondaryYAxisDisplayOptions": "The label display options (grid line, range, scale, axis step) of a combo chart's secondary y-axis (line) field well.", "SecondaryYAxisLabelOptions": "The label options (label text, label visibility, and sort icon visibility) of a combo chart's secondary y-axis(line) field well.", + "SingleAxisOptions": "", "SortConfiguration": "The sort configuration of a `ComboChartVisual` .", "Tooltip": "The legend display setup of the visual.", "VisualPalette": "The palette (chart color) display setup of the visual." @@ -35450,6 +36768,7 @@ "ValueWhenUnsetOption": "The built-in options for default values. The value can be one of the following:\n\n- `RECOMMENDED` : The recommended value.\n- `NULL` : The `NULL` value." }, "AWS::QuickSight::Template DefaultDateTimePickerControlOptions": { + "CommitMode": "The visibility configuration of the Apply button on a `DateTimePickerControl` .", "DisplayOptions": "The display options of a control.", "Type": "The date time picker type of the `DefaultDateTimePickerControlOptions` . Choose one of the following options:\n\n- `SINGLE_VALUED` : The filter condition is a fixed date.\n- `DATE_RANGE` : The filter condition is a date time range." }, @@ -35467,6 +36786,7 @@ "DefaultTextFieldOptions": "The default options that correspond to the `TextField` filter control type." }, "AWS::QuickSight::Template DefaultFilterDropDownControlOptions": { + "CommitMode": "The visibility configuration of the Apply button on a `FilterDropDownControl` .", "DisplayOptions": "The display options of a control.", "SelectableValues": "A list of selectable values that are used in a control.", "Type": "The type of the `FilterDropDownControl` . Choose one of the following options:\n\n- `MULTI_SELECT` : The user can select multiple entries from a dropdown menu.\n- `SINGLE_SELECT` : The user can select a single entry from a dropdown menu." @@ -35495,6 +36815,7 @@ "SectionBased": "The options that determine the default settings for a section-based layout configuration." }, "AWS::QuickSight::Template DefaultRelativeDateTimeControlOptions": { + "CommitMode": "The visibility configuration of the Apply button on a `RelativeDateTimeControl` .", "DisplayOptions": "The display options of a control." }, "AWS::QuickSight::Template DefaultSectionBasedLayoutConfiguration": { @@ -35591,6 +36912,7 @@ "AWS::QuickSight::Template FieldTooltipItem": { "FieldId": "The unique ID of the field that is targeted by the tooltip.", "Label": "The label of the tooltip item.", + "TooltipTarget": "Determines the target of the field tooltip item in a combo chart visual.", "Visibility": "The visibility of the tooltip item." }, "AWS::QuickSight::Template FilledMapAggregatedFieldWells": { @@ -35632,6 +36954,7 @@ }, "AWS::QuickSight::Template Filter": { "CategoryFilter": "A `CategoryFilter` filters text values.\n\nFor more information, see [Adding text filters](https://docs.aws.amazon.com/quicksight/latest/user/add-a-text-filter-data-prep.html) in the *Amazon QuickSight User Guide* .", + "NestedFilter": "A `NestedFilter` filters data with a subset of data that is defined by the nested inner filter.", "NumericEqualityFilter": "A `NumericEqualityFilter` filters numeric values that equal or do not equal a given numeric value.", "NumericRangeFilter": "A `NumericRangeFilter` filters numeric values that are either inside or outside a given numeric range.", "RelativeDatesFilter": "A `RelativeDatesFilter` filters date values that are relative to a given date.", @@ -35655,6 +36978,7 @@ "SourceFilterId": "The source filter ID of the `FilterCrossSheetControl` ." }, "AWS::QuickSight::Template FilterDateTimePickerControl": { + "CommitMode": "The visibility configurationof the Apply button on a `DateTimePickerControl` .", "DisplayOptions": "The display options of a control.", "FilterControlId": "The ID of the `FilterDateTimePickerControl` .", "SourceFilterId": "The source filter ID of the `FilterDateTimePickerControl` .", @@ -35663,6 +36987,7 @@ }, "AWS::QuickSight::Template FilterDropDownControl": { "CascadingControlConfiguration": "The values that are displayed in a control can be configured to only show values that are valid based on what's selected in other controls.", + "CommitMode": "The visibility configuration of the Apply button on a `FilterDropDownControl` .", "DisplayOptions": "The display options of the `FilterDropDownControl` .", "FilterControlId": "The ID of the `FilterDropDownControl` .", "SelectableValues": "A list of selectable values that are used in a control.", @@ -35701,6 +37026,7 @@ "SameSheetTargetVisualConfiguration": "The configuration of the same-sheet target visuals that you want to be filtered." }, "AWS::QuickSight::Template FilterRelativeDateTimeControl": { + "CommitMode": "The visibility configuration of the Apply button on a `FilterRelativeDateTimeControl` .", "DisplayOptions": "The display options of a control.", "FilterControlId": "The ID of the `FilterTextAreaControl` .", "SourceFilterId": "The source filter ID of the `FilterTextAreaControl` .", @@ -36043,6 +37369,9 @@ "Title": "The title that is displayed on the visual.", "VisualId": "The unique identifier of a visual. This identifier must be unique within the context of a dashboard, template, or analysis. Two dashboards, analyses, or templates can have visuals with the same identifiers." }, + "AWS::QuickSight::Template InnerFilter": { + "CategoryInnerFilter": "A `CategoryInnerFilter` filters text values for the `NestedFilter` ." + }, "AWS::QuickSight::Template InsightConfiguration": { "Computations": "The computations configurations of the insight visual", "CustomNarrative": "The custom narrative of the insight visual." @@ -36182,6 +37511,7 @@ "SecondaryYAxisDisplayOptions": "The series axis configuration of a line chart.", "SecondaryYAxisLabelOptions": "The options that determine the presentation of the secondary y-axis label.", "Series": "The series item configuration of a line chart.", + "SingleAxisOptions": "", "SmallMultiplesOptions": "The small multiples setup for the visual.", "SortConfiguration": "The sort configuration of a line chart.", "Tooltip": "The tooltip configuration of a line chart.", @@ -36291,6 +37621,12 @@ "AWS::QuickSight::Template NegativeValueConfiguration": { "DisplayMode": "Determines the display mode of the negative value configuration." }, + "AWS::QuickSight::Template NestedFilter": { + "Column": "The column that the filter is applied to.", + "FilterId": "An identifier that uniquely identifies a filter within a dashboard, analysis, or template.", + "IncludeInnerSet": "A boolean condition to include or exclude the subset that is defined by the values of the nested inner filter.", + "InnerFilter": "The `InnerFilter` defines the subset of data to be used with the `NestedFilter` ." + }, "AWS::QuickSight::Template NullValueFormatConfiguration": { "NullString": "Determines the null string of null values." }, @@ -36408,6 +37744,7 @@ }, "AWS::QuickSight::Template ParameterDropDownControl": { "CascadingControlConfiguration": "The values that are displayed in a control can be configured to only show values that are valid based on what's selected in other controls.", + "CommitMode": "The visibility configuration of the Apply button on a `ParameterDropDownControl` .", "DisplayOptions": "The display options of a control.", "ParameterControlId": "The ID of the `ParameterDropDownControl` .", "SelectableValues": "A list of selectable values that are used in a control.", @@ -36636,6 +37973,9 @@ "AWS::QuickSight::Template ProgressBarOptions": { "Visibility": "The visibility of the progress bar." }, + "AWS::QuickSight::Template QueryExecutionOptions": { + "QueryExecutionMode": "A structure that describes the query execution mode." + }, "AWS::QuickSight::Template RadarChartAggregatedFieldWells": { "Category": "The aggregated field well categories of a radar chart.", "Color": "The color that are assigned to the aggregated field wells of a radar chart.", @@ -36918,6 +38258,9 @@ "AWS::QuickSight::Template SimpleClusterMarker": { "Color": "The color of the simple cluster marker." }, + "AWS::QuickSight::Template SingleAxisOptions": { + "YAxisOptions": "The Y axis options of a single axis configuration." + }, "AWS::QuickSight::Template SliderControlDisplayOptions": { "InfoIconLabelOptions": "The configuration of info icon label options.", "TitleOptions": "The options to configure the title visibility, name, and font size." @@ -37133,6 +38476,7 @@ "FilterGroups": "Filter definitions for a template.\n\nFor more information, see [Filtering Data](https://docs.aws.amazon.com/quicksight/latest/user/filtering-visual-data.html) in the *Amazon QuickSight User Guide* .", "Options": "An array of option definitions for a template.", "ParameterDeclarations": "An array of parameter declarations for a template.\n\n*Parameters* are named variables that can transfer a value for use by an action or an object.\n\nFor more information, see [Parameters in Amazon QuickSight](https://docs.aws.amazon.com/quicksight/latest/user/parameters-in-quicksight.html) in the *Amazon QuickSight User Guide* .", + "QueryExecutionOptions": "", "Sheets": "An array of sheet definitions for a template." }, "AWS::QuickSight::Template TextAreaControlDisplayOptions": { @@ -37439,6 +38783,9 @@ "Title": "The title that is displayed on the visual.", "VisualId": "The unique identifier of a visual. This identifier must be unique within the context of a dashboard, template, or analysis. Two dashboards, analyses, or templates can have visuals with the same identifiers.." }, + "AWS::QuickSight::Template YAxisOptions": { + "YAxis": "The Y axis type to be used in the chart.\n\nIf you choose `PRIMARY_Y_AXIS` , the primary Y Axis is located on the leftmost vertical axis of the chart." + }, "AWS::QuickSight::Theme": { "AwsAccountId": "The ID of the AWS account where you want to store the new theme.", "BaseThemeId": "The ID of the theme that a custom theme will inherit from. All themes inherit from one of the starting themes defined by Amazon QuickSight. For a list of the starting themes, use `ListThemes` or choose *Themes* from within an analysis.", @@ -37774,7 +39121,7 @@ "AssociatedRoles": "Provides a list of the AWS Identity and Access Management (IAM) roles that are associated with the DB cluster. IAM roles that are associated with a DB cluster grant permission for the DB cluster to access other Amazon Web Services on your behalf.\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", "AutoMinorVersionUpgrade": "Specifies whether minor engine upgrades are applied automatically to the DB cluster during the maintenance window. By default, minor engine upgrades are applied automatically.\n\nValid for Cluster Type: Multi-AZ DB clusters only", "AvailabilityZones": "A list of Availability Zones (AZs) where instances in the DB cluster can be created. For information on AWS Regions and Availability Zones, see [Choosing the Regions and Availability Zones](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Concepts.RegionsAndAvailabilityZones.html) in the *Amazon Aurora User Guide* .\n\nValid for: Aurora DB clusters only", - "BacktrackWindow": "The target backtrack window, in seconds. To disable backtracking, set this value to 0.\n\n> Currently, Backtrack is only supported for Aurora MySQL DB clusters. \n\nDefault: 0\n\nConstraints:\n\n- If specified, this value must be set to a number from 0 to 259,200 (72 hours).\n\nValid for: Aurora MySQL DB clusters only", + "BacktrackWindow": "The target backtrack window, in seconds. To disable backtracking, set this value to `0` .\n\nValid for Cluster Type: Aurora MySQL DB clusters only\n\nDefault: `0`\n\nConstraints:\n\n- If specified, this value must be set to a number from 0 to 259,200 (72 hours).", "BackupRetentionPeriod": "The number of days for which automated backups are retained.\n\nDefault: 1\n\nConstraints:\n\n- Must be a value from 1 to 35\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", "CopyTagsToSnapshot": "A value that indicates whether to copy all tags from the DB cluster to snapshots of the DB cluster. The default is not to copy them.\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", "DBClusterIdentifier": "The DB cluster identifier. This parameter is stored as a lowercase string.\n\nConstraints:\n\n- Must contain from 1 to 63 letters, numbers, or hyphens.\n- First character must be a letter.\n- Can't end with a hyphen or contain two consecutive hyphens.\n\nExample: `my-cluster1`\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", @@ -37812,7 +39159,7 @@ "Port": "The port number on which the DB instances in the DB cluster accept connections.\n\nDefault:\n\n- When `EngineMode` is `provisioned` , `3306` (for both Aurora MySQL and Aurora PostgreSQL)\n- When `EngineMode` is `serverless` :\n\n- `3306` when `Engine` is `aurora` or `aurora-mysql`\n- `5432` when `Engine` is `aurora-postgresql`\n\n> The `No interruption` on update behavior only applies to DB clusters. If you are updating a DB instance, see [Port](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-rds-database-instance.html#cfn-rds-dbinstance-port) for the AWS::RDS::DBInstance resource. \n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", "PreferredBackupWindow": "The daily time range during which automated backups are created. For more information, see [Backup Window](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.Managing.Backups.html#Aurora.Managing.Backups.BackupWindow) in the *Amazon Aurora User Guide.*\n\nConstraints:\n\n- Must be in the format `hh24:mi-hh24:mi` .\n- Must be in Universal Coordinated Time (UTC).\n- Must not conflict with the preferred maintenance window.\n- Must be at least 30 minutes.\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", "PreferredMaintenanceWindow": "The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).\n\nFormat: `ddd:hh24:mi-ddd:hh24:mi`\n\nThe default is a 30-minute window selected at random from an 8-hour block of time for each AWS Region, occurring on a random day of the week. To see the time blocks available, see [Adjusting the Preferred DB Cluster Maintenance Window](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_UpgradeDBInstance.Maintenance.html#AdjustingTheMaintenanceWindow.Aurora) in the *Amazon Aurora User Guide.*\n\nValid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun.\n\nConstraints: Minimum 30-minute window.\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", - "PubliclyAccessible": "Specifies whether the DB cluster is publicly accessible.\n\nWhen the DB cluster is publicly accessible, its Domain Name System (DNS) endpoint resolves to the private IP address from within the DB cluster's virtual private cloud (VPC). It resolves to the public IP address from outside of the DB cluster's VPC. Access to the DB cluster is ultimately controlled by the security group it uses. That public access isn't permitted if the security group assigned to the DB cluster doesn't permit it.\n\nWhen the DB cluster isn't publicly accessible, it is an internal DB cluster with a DNS name that resolves to a private IP address.\n\nValid for Cluster Type: Multi-AZ DB clusters only\n\nDefault: The default behavior varies depending on whether `DBSubnetGroupName` is specified.\n\nIf `DBSubnetGroupName` isn't specified, and `PubliclyAccessible` isn't specified, the following applies:\n\n- If the default VPC in the target Region doesn\u2019t have an internet gateway attached to it, the DB cluster is private.\n- If the default VPC in the target Region has an internet gateway attached to it, the DB cluster is public.\n\nIf `DBSubnetGroupName` is specified, and `PubliclyAccessible` isn't specified, the following applies:\n\n- If the subnets are part of a VPC that doesn\u2019t have an internet gateway attached to it, the DB cluster is private.\n- If the subnets are part of a VPC that has an internet gateway attached to it, the DB cluster is public.", + "PubliclyAccessible": "Specifies whether the DB cluster is publicly accessible.\n\nWhen the DB cluster is publicly accessible and you connect from outside of the DB cluster's virtual private cloud (VPC), its Domain Name System (DNS) endpoint resolves to the public IP address. When you connect from within the same VPC as the DB cluster, the endpoint resolves to the private IP address. Access to the DB cluster is ultimately controlled by the security group it uses. That public access isn't permitted if the security group assigned to the DB cluster doesn't permit it.\n\nWhen the DB cluster isn't publicly accessible, it is an internal DB cluster with a DNS name that resolves to a private IP address.\n\nValid for Cluster Type: Multi-AZ DB clusters only\n\nDefault: The default behavior varies depending on whether `DBSubnetGroupName` is specified.\n\nIf `DBSubnetGroupName` isn't specified, and `PubliclyAccessible` isn't specified, the following applies:\n\n- If the default VPC in the target Region doesn\u2019t have an internet gateway attached to it, the DB cluster is private.\n- If the default VPC in the target Region has an internet gateway attached to it, the DB cluster is public.\n\nIf `DBSubnetGroupName` is specified, and `PubliclyAccessible` isn't specified, the following applies:\n\n- If the subnets are part of a VPC that doesn\u2019t have an internet gateway attached to it, the DB cluster is private.\n- If the subnets are part of a VPC that has an internet gateway attached to it, the DB cluster is public.", "ReadEndpoint": "This data type represents the information you need to connect to an Amazon RDS DB instance. This data type is used as a response element in the following actions:\n\n- `CreateDBInstance`\n- `DescribeDBInstances`\n- `DeleteDBInstance`\n\nFor the data structure that represents Amazon Aurora DB cluster endpoints, see `DBClusterEndpoint` .", "ReplicationSourceIdentifier": "The Amazon Resource Name (ARN) of the source DB instance or DB cluster if this DB cluster is created as a read replica.\n\nValid for: Aurora DB clusters only", "RestoreToTime": "The date and time to restore the DB cluster to.\n\nValid Values: Value must be a time in Universal Coordinated Time (UTC) format\n\nConstraints:\n\n- Must be before the latest restorable time for the DB instance\n- Must be specified if `UseLatestRestorableTime` parameter isn't provided\n- Can't be specified if the `UseLatestRestorableTime` parameter is enabled\n- Can't be specified if the `RestoreType` parameter is `copy-on-write`\n\nThis property must be used with `SourceDBClusterIdentifier` property. The resulting cluster will have the identifier that matches the value of the `DBclusterIdentifier` property.\n\nExample: `2015-03-07T23:45:00Z`\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", @@ -37824,7 +39171,7 @@ "SourceRegion": "The AWS Region which contains the source DB cluster when replicating a DB cluster. For example, `us-east-1` .\n\nValid for: Aurora DB clusters only", "StorageEncrypted": "Indicates whether the DB cluster is encrypted.\n\nIf you specify the `KmsKeyId` property, then you must enable encryption.\n\nIf you specify the `SourceDBClusterIdentifier` property, don't specify this property. The value is inherited from the source DB cluster, and if the DB cluster is encrypted, the specified `KmsKeyId` property is used.\n\nIf you specify the `SnapshotIdentifier` and the specified snapshot is encrypted, don't specify this property. The value is inherited from the snapshot, and the specified `KmsKeyId` property is used.\n\nIf you specify the `SnapshotIdentifier` and the specified snapshot isn't encrypted, you can use this property to specify that the restored DB cluster is encrypted. Specify the `KmsKeyId` property for the KMS key to use for encryption. If you don't want the restored DB cluster to be encrypted, then don't set this property or set it to `false` .\n\n> If you specify both the `StorageEncrypted` and `SnapshotIdentifier` properties without specifying the `KmsKeyId` property, then the restored DB cluster inherits the encryption settings from the DB snapshot that provide. \n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", "StorageType": "The storage type to associate with the DB cluster.\n\nFor information on storage types for Aurora DB clusters, see [Storage configurations for Amazon Aurora DB clusters](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.Overview.StorageReliability.html#aurora-storage-type) . For information on storage types for Multi-AZ DB clusters, see [Settings for creating Multi-AZ DB clusters](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/create-multi-az-db-cluster.html#create-multi-az-db-cluster-settings) .\n\nThis setting is required to create a Multi-AZ DB cluster.\n\nWhen specified for a Multi-AZ DB cluster, a value for the `Iops` parameter is required.\n\nValid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters\n\nValid Values:\n\n- Aurora DB clusters - `aurora | aurora-iopt1`\n- Multi-AZ DB clusters - `io1 | io2 | gp3`\n\nDefault:\n\n- Aurora DB clusters - `aurora`\n- Multi-AZ DB clusters - `io1`\n\n> When you create an Aurora DB cluster with the storage type set to `aurora-iopt1` , the storage type is returned in the response. The storage type isn't returned when you set it to `aurora` .", - "Tags": "An optional array of key-value pairs to apply to this DB cluster.\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", + "Tags": "Tags to assign to the DB cluster.\n\nValid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters", "UseLatestRestorableTime": "A value that indicates whether to restore the DB cluster to the latest restorable backup time. By default, the DB cluster is not restored to the latest restorable backup time.\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", "VpcSecurityGroupIds": "A list of EC2 VPC security groups to associate with this DB cluster.\n\nIf you plan to update the resource, don't specify VPC security groups in a shared VPC.\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters" }, @@ -37838,7 +39185,7 @@ }, "AWS::RDS::DBCluster MasterUserSecret": { "KmsKeyId": "The AWS KMS key identifier that is used to encrypt the secret.", - "SecretArn": "The Amazon Resource Name (ARN) of the secret." + "SecretArn": "The Amazon Resource Name (ARN) of the secret. This parameter is a return value that you can retrieve using the `Fn::GetAtt` intrinsic function. For more information, see [Return values](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-rds-dbcluster.html#aws-resource-rds-dbcluster-return-values) ." }, "AWS::RDS::DBCluster ReadEndpoint": { "Address": "The host address of the reader endpoint." @@ -37860,11 +39207,11 @@ "Value": "A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and can't be prefixed with `aws:` or `rds:` . The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$\")." }, "AWS::RDS::DBClusterParameterGroup": { - "DBClusterParameterGroupName": "The name of the DB cluster parameter group.\n\nConstraints:\n\n- Must not match the name of an existing DB cluster parameter group.\n\nIf you don't specify a value for `DBClusterParameterGroupName` property, a name is automatically created for the DB cluster parameter group.\n\n> This value is stored as a lowercase string.", - "Description": "A friendly description for this DB cluster parameter group.", - "Family": "The DB cluster parameter group family name. A DB cluster parameter group can be associated with one and only one DB cluster parameter group family, and can be applied only to a DB cluster running a DB engine and engine version compatible with that DB cluster parameter group family.\n\n> The DB cluster parameter group family can't be changed when updating a DB cluster parameter group. \n\nTo list all of the available parameter group families, use the following command:\n\n`aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\"`\n\nThe output contains duplicates.\n\nFor more information, see `[CreateDBClusterParameterGroup](https://docs.aws.amazon.com//AmazonRDS/latest/APIReference/API_CreateDBClusterParameterGroup.html)` .", + "DBClusterParameterGroupName": "The name of the DB cluster parameter group.\n\nConstraints:\n\n- Must not match the name of an existing DB cluster parameter group.\n\n> This value is stored as a lowercase string.", + "Description": "The description for the DB cluster parameter group.", + "Family": "The DB cluster parameter group family name. A DB cluster parameter group can be associated with one and only one DB cluster parameter group family, and can be applied only to a DB cluster running a database engine and engine version compatible with that DB cluster parameter group family.\n\n*Aurora MySQL*\n\nExample: `aurora-mysql5.7` , `aurora-mysql8.0`\n\n*Aurora PostgreSQL*\n\nExample: `aurora-postgresql14`\n\n*RDS for MySQL*\n\nExample: `mysql8.0`\n\n*RDS for PostgreSQL*\n\nExample: `postgres13`\n\nTo list all of the available parameter group families for a DB engine, use the following command:\n\n`aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\" --engine `\n\nFor example, to list all of the available parameter group families for the Aurora PostgreSQL DB engine, use the following command:\n\n`aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\" --engine aurora-postgresql`\n\n> The output contains duplicates. \n\nThe following are the valid DB engine values:\n\n- `aurora-mysql`\n- `aurora-postgresql`\n- `mysql`\n- `postgres`", "Parameters": "Provides a list of parameters for the DB cluster parameter group.", - "Tags": "An optional array of key-value pairs to apply to this DB cluster parameter group." + "Tags": "Tags to assign to the DB cluster parameter group." }, "AWS::RDS::DBClusterParameterGroup Tag": { "Key": "A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and can't be prefixed with `aws:` or `rds:` . The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$\").", @@ -37876,7 +39223,7 @@ "AssociatedRoles": "The AWS Identity and Access Management (IAM) roles associated with the DB instance.\n\n*Amazon Aurora*\n\nNot applicable. The associated roles are managed by the DB cluster.", "AutoMinorVersionUpgrade": "A value that indicates whether minor engine upgrades are applied automatically to the DB instance during the maintenance window. By default, minor engine upgrades are applied automatically.", "AutomaticBackupReplicationKmsKeyId": "The AWS KMS key identifier for encryption of the replicated automated backups. The KMS key ID is the Amazon Resource Name (ARN) for the KMS encryption key in the destination AWS Region , for example, `arn:aws:kms:us-east-1:123456789012:key/AKIAIOSFODNN7EXAMPLE` .", - "AutomaticBackupReplicationRegion": "", + "AutomaticBackupReplicationRegion": "The AWS Region associated with the automated backup.", "AvailabilityZone": "The Availability Zone (AZ) where the database will be created. For information on AWS Regions and Availability Zones, see [Regions and Availability Zones](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html) .\n\nFor Amazon Aurora, each Aurora DB cluster hosts copies of its storage in three separate Availability Zones. Specify one of these Availability Zones. Aurora automatically chooses an appropriate Availability Zone if you don't specify one.\n\nDefault: A random, system-chosen Availability Zone in the endpoint's AWS Region .\n\nConstraints:\n\n- The `AvailabilityZone` parameter can't be specified if the DB instance is a Multi-AZ deployment.\n- The specified Availability Zone must be in the same AWS Region as the current endpoint.\n\nExample: `us-east-1d`", "BackupRetentionPeriod": "The number of days for which automated backups are retained. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups.\n\n*Amazon Aurora*\n\nNot applicable. The retention period for automated backups is managed by the DB cluster.\n\nDefault: 1\n\nConstraints:\n\n- Must be a value from 0 to 35\n- Can't be set to 0 if the DB instance is a source to read replicas", "CACertificateIdentifier": "The identifier of the CA certificate for this DB instance.\n\nFor more information, see [Using SSL/TLS to encrypt a connection to a DB instance](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.SSL.html) in the *Amazon RDS User Guide* and [Using SSL/TLS to encrypt a connection to a DB cluster](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/UsingWithRDS.SSL.html) in the *Amazon Aurora User Guide* .", @@ -37885,19 +39232,19 @@ "CharacterSetName": "For supported engines, indicates that the DB instance should be associated with the specified character set.\n\n*Amazon Aurora*\n\nNot applicable. The character set is managed by the DB cluster. For more information, see [AWS::RDS::DBCluster](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-rds-dbcluster.html) .", "CopyTagsToSnapshot": "Specifies whether to copy tags from the DB instance to snapshots of the DB instance. By default, tags are not copied.\n\nThis setting doesn't apply to Amazon Aurora DB instances. Copying tags to snapshots is managed by the DB cluster. Setting this value for an Aurora DB instance has no effect on the DB cluster setting.", "CustomIAMInstanceProfile": "The instance profile associated with the underlying Amazon EC2 instance of an RDS Custom DB instance.\n\nThis setting is required for RDS Custom.\n\nConstraints:\n\n- The profile must exist in your account.\n- The profile must have an IAM role that Amazon EC2 has permissions to assume.\n- The instance profile name and the associated IAM role name must start with the prefix `AWSRDSCustom` .\n\nFor the list of permissions required for the IAM role, see [Configure IAM and your VPC](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/custom-setup-orcl.html#custom-setup-orcl.iam-vpc) in the *Amazon RDS User Guide* .", - "DBClusterIdentifier": "The identifier of the DB cluster that the instance will belong to.", + "DBClusterIdentifier": "The identifier of the DB cluster that this DB instance will belong to.\n\nThis setting doesn't apply to RDS Custom DB instances.", "DBClusterSnapshotIdentifier": "The identifier for the Multi-AZ DB cluster snapshot to restore from.\n\nFor more information on Multi-AZ DB clusters, see [Multi-AZ DB cluster deployments](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/multi-az-db-clusters-concepts.html) in the *Amazon RDS User Guide* .\n\nConstraints:\n\n- Must match the identifier of an existing Multi-AZ DB cluster snapshot.\n- Can't be specified when `DBSnapshotIdentifier` is specified.\n- Must be specified when `DBSnapshotIdentifier` isn't specified.\n- If you are restoring from a shared manual Multi-AZ DB cluster snapshot, the `DBClusterSnapshotIdentifier` must be the ARN of the shared snapshot.\n- Can't be the identifier of an Aurora DB cluster snapshot.", "DBInstanceClass": "The compute and memory capacity of the DB instance, for example `db.m5.large` . Not all DB instance classes are available in all AWS Regions , or for all database engines. For the full list of DB instance classes, and availability for your engine, see [DB instance classes](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html) in the *Amazon RDS User Guide* or [Aurora DB instance classes](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Concepts.DBInstanceClass.html) in the *Amazon Aurora User Guide* .", "DBInstanceIdentifier": "A name for the DB instance. If you specify a name, AWS CloudFormation converts it to lowercase. If you don't specify a name, AWS CloudFormation generates a unique physical ID and uses that ID for the DB instance. For more information, see [Name Type](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-name.html) .\n\nFor information about constraints that apply to DB instance identifiers, see [Naming constraints in Amazon RDS](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Limits.html#RDS_Limits.Constraints) in the *Amazon RDS User Guide* .\n\n> If you specify a name, you can't perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you must replace the resource, specify a new name.", "DBName": "The meaning of this parameter differs according to the database engine you use.\n\n> If you specify the `[DBSnapshotIdentifier](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-rds-database-instance.html#cfn-rds-dbinstance-dbsnapshotidentifier)` property, this property only applies to RDS for Oracle. \n\n*Amazon Aurora*\n\nNot applicable. The database name is managed by the DB cluster.\n\n*Db2*\n\nThe name of the database to create when the DB instance is created. If this parameter isn't specified, no database is created in the DB instance.\n\nConstraints:\n\n- Must contain 1 to 64 letters or numbers.\n- Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).\n- Can't be a word reserved by the specified database engine.\n\n*MySQL*\n\nThe name of the database to create when the DB instance is created. If this parameter is not specified, no database is created in the DB instance.\n\nConstraints:\n\n- Must contain 1 to 64 letters or numbers.\n- Can't be a word reserved by the specified database engine\n\n*MariaDB*\n\nThe name of the database to create when the DB instance is created. If this parameter is not specified, no database is created in the DB instance.\n\nConstraints:\n\n- Must contain 1 to 64 letters or numbers.\n- Can't be a word reserved by the specified database engine\n\n*PostgreSQL*\n\nThe name of the database to create when the DB instance is created. If this parameter is not specified, the default `postgres` database is created in the DB instance.\n\nConstraints:\n\n- Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).\n- Must contain 1 to 63 characters.\n- Can't be a word reserved by the specified database engine\n\n*Oracle*\n\nThe Oracle System ID (SID) of the created DB instance. If you specify `null` , the default value `ORCL` is used. You can't specify the string NULL, or any other reserved word, for `DBName` .\n\nDefault: `ORCL`\n\nConstraints:\n\n- Can't be longer than 8 characters\n\n*SQL Server*\n\nNot applicable. Must be null.", "DBParameterGroupName": "The name of an existing DB parameter group or a reference to an [AWS::RDS::DBParameterGroup](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-rds-dbparametergroup.html) resource created in the template.\n\nTo list all of the available DB parameter group names, use the following command:\n\n`aws rds describe-db-parameter-groups --query \"DBParameterGroups[].DBParameterGroupName\" --output text`\n\n> If any of the data members of the referenced parameter group are changed during an update, the DB instance might need to be restarted, which causes some interruption. If the parameter group contains static parameters, whether they were changed or not, an update triggers a reboot. \n\nIf you don't specify a value for `DBParameterGroupName` property, the default DB parameter group for the specified engine and engine version is used.", "DBSecurityGroups": "A list of the DB security groups to assign to the DB instance. The list can include both the name of existing DB security groups or references to AWS::RDS::DBSecurityGroup resources created in the template.\n\nIf you set DBSecurityGroups, you must not set VPCSecurityGroups, and vice versa. Also, note that the DBSecurityGroups property exists only for backwards compatibility with older regions and is no longer recommended for providing security information to an RDS DB instance. Instead, use VPCSecurityGroups.\n\n> If you specify this property, AWS CloudFormation sends only the following properties (if specified) to Amazon RDS during create operations:\n> \n> - `AllocatedStorage`\n> - `AutoMinorVersionUpgrade`\n> - `AvailabilityZone`\n> - `BackupRetentionPeriod`\n> - `CharacterSetName`\n> - `DBInstanceClass`\n> - `DBName`\n> - `DBParameterGroupName`\n> - `DBSecurityGroups`\n> - `DBSubnetGroupName`\n> - `Engine`\n> - `EngineVersion`\n> - `Iops`\n> - `LicenseModel`\n> - `MasterUsername`\n> - `MasterUserPassword`\n> - `MultiAZ`\n> - `OptionGroupName`\n> - `PreferredBackupWindow`\n> - `PreferredMaintenanceWindow`\n> \n> All other properties are ignored. Specify a virtual private cloud (VPC) security group if you want to submit other properties, such as `StorageType` , `StorageEncrypted` , or `KmsKeyId` . If you're already using the `DBSecurityGroups` property, you can't use these other properties by updating your DB instance to use a VPC security group. You must recreate the DB instance.", - "DBSnapshotIdentifier": "The name or Amazon Resource Name (ARN) of the DB snapshot that's used to restore the DB instance. If you're restoring from a shared manual DB snapshot, you must specify the ARN of the snapshot.\n\nBy specifying this property, you can create a DB instance from the specified DB snapshot. If the `DBSnapshotIdentifier` property is an empty string or the `AWS::RDS::DBInstance` declaration has no `DBSnapshotIdentifier` property, AWS CloudFormation creates a new database. If the property contains a value (other than an empty string), AWS CloudFormation creates a database from the specified snapshot. If a snapshot with the specified name doesn't exist, AWS CloudFormation can't create the database and it rolls back the stack.\n\nSome DB instance properties aren't valid when you restore from a snapshot, such as the `MasterUsername` and `MasterUserPassword` properties. For information about the properties that you can specify, see the `RestoreDBInstanceFromDBSnapshot` action in the *Amazon RDS API Reference* .\n\nAfter you restore a DB instance with a `DBSnapshotIdentifier` property, you must specify the same `DBSnapshotIdentifier` property for any future updates to the DB instance. When you specify this property for an update, the DB instance is not restored from the DB snapshot again, and the data in the database is not changed. However, if you don't specify the `DBSnapshotIdentifier` property, an empty DB instance is created, and the original DB instance is deleted. If you specify a property that is different from the previous snapshot restore property, a new DB instance is restored from the specified `DBSnapshotIdentifier` property, and the original DB instance is deleted.\n\nIf you specify the `DBSnapshotIdentifier` property to restore a DB instance (as opposed to specifying it for DB instance updates), then don't specify the following properties:\n\n- `CharacterSetName`\n- `DBClusterIdentifier`\n- `DBName`\n- `DeleteAutomatedBackups`\n- `EnablePerformanceInsights`\n- `KmsKeyId`\n- `MasterUsername`\n- `MasterUserPassword`\n- `PerformanceInsightsKMSKeyId`\n- `PerformanceInsightsRetentionPeriod`\n- `PromotionTier`\n- `SourceDBInstanceIdentifier`\n- `SourceRegion`\n- `StorageEncrypted` (for an encrypted snapshot)\n- `Timezone`\n\n*Amazon Aurora*\n\nNot applicable. Snapshot restore is managed by the DB cluster.", - "DBSubnetGroupName": "A DB subnet group to associate with the DB instance. If you update this value, the new subnet group must be a subnet group in a new VPC.\n\nIf there's no DB subnet group, then the DB instance isn't a VPC DB instance.\n\nFor more information about using Amazon RDS in a VPC, see [Using Amazon RDS with Amazon Virtual Private Cloud (VPC)](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_VPC.html) in the *Amazon RDS User Guide* .\n\n*Amazon Aurora*\n\nNot applicable. The DB subnet group is managed by the DB cluster. If specified, the setting must match the DB cluster setting.", + "DBSnapshotIdentifier": "The name or Amazon Resource Name (ARN) of the DB snapshot that's used to restore the DB instance. If you're restoring from a shared manual DB snapshot, you must specify the ARN of the snapshot.\n\nBy specifying this property, you can create a DB instance from the specified DB snapshot. If the `DBSnapshotIdentifier` property is an empty string or the `AWS::RDS::DBInstance` declaration has no `DBSnapshotIdentifier` property, AWS CloudFormation creates a new database. If the property contains a value (other than an empty string), AWS CloudFormation creates a database from the specified snapshot. If a snapshot with the specified name doesn't exist, AWS CloudFormation can't create the database and it rolls back the stack.\n\nSome DB instance properties aren't valid when you restore from a snapshot, such as the `MasterUsername` and `MasterUserPassword` properties. For information about the properties that you can specify, see the `RestoreDBInstanceFromDBSnapshot` action in the *Amazon RDS API Reference* .\n\nAfter you restore a DB instance with a `DBSnapshotIdentifier` property, you must specify the same `DBSnapshotIdentifier` property for any future updates to the DB instance. When you specify this property for an update, the DB instance is not restored from the DB snapshot again, and the data in the database is not changed. However, if you don't specify the `DBSnapshotIdentifier` property, an empty DB instance is created, and the original DB instance is deleted. If you specify a property that is different from the previous snapshot restore property, a new DB instance is restored from the specified `DBSnapshotIdentifier` property, and the original DB instance is deleted.\n\nIf you specify the `DBSnapshotIdentifier` property to restore a DB instance (as opposed to specifying it for DB instance updates), then don't specify the following properties:\n\n- `CharacterSetName`\n- `DBClusterIdentifier`\n- `DBName`\n- `KmsKeyId`\n- `MasterUsername`\n- `MasterUserPassword`\n- `PromotionTier`\n- `SourceDBInstanceIdentifier`\n- `SourceRegion`\n- `StorageEncrypted` (for an unencrypted snapshot)\n- `Timezone`\n\n*Amazon Aurora*\n\nNot applicable. Snapshot restore is managed by the DB cluster.", + "DBSubnetGroupName": "A DB subnet group to associate with the DB instance. If you update this value, the new subnet group must be a subnet group in a new VPC.\n\nIf there's no DB subnet group, then the DB instance isn't a VPC DB instance.\n\nFor more information about using Amazon RDS in a VPC, see [Amazon VPC and Amazon RDS](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_VPC.html) in the *Amazon RDS User Guide* .\n\nThis setting doesn't apply to Amazon Aurora DB instances. The DB subnet group is managed by the DB cluster. If specified, the setting must match the DB cluster setting.", "DBSystemId": "The Oracle system identifier (SID), which is the name of the Oracle database instance that manages your database files. In this context, the term \"Oracle database instance\" refers exclusively to the system global area (SGA) and Oracle background processes. If you don't specify a SID, the value defaults to `RDSCDB` . The Oracle SID is also the name of your CDB.", "DedicatedLogVolume": "Indicates whether the DB instance has a dedicated log volume (DLV) enabled.", "DeleteAutomatedBackups": "A value that indicates whether to remove automated backups immediately after the DB instance is deleted. This parameter isn't case-sensitive. The default is to remove automated backups immediately after the DB instance is deleted.\n\n*Amazon Aurora*\n\nNot applicable. When you delete a DB cluster, all automated backups for that DB cluster are deleted and can't be recovered. Manual DB cluster snapshots of the DB cluster are not deleted.", - "DeletionProtection": "A value that indicates whether the DB instance has deletion protection enabled. The database can't be deleted when deletion protection is enabled. By default, deletion protection is disabled. For more information, see [Deleting a DB Instance](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_DeleteInstance.html) .\n\n*Amazon Aurora*\n\nNot applicable. You can enable or disable deletion protection for the DB cluster. For more information, see `CreateDBCluster` . DB instances in a DB cluster can be deleted even when deletion protection is enabled for the DB cluster.", + "DeletionProtection": "Specifies whether the DB instance has deletion protection enabled. The database can't be deleted when deletion protection is enabled. By default, deletion protection isn't enabled. For more information, see [Deleting a DB Instance](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_DeleteInstance.html) .\n\nThis setting doesn't apply to Amazon Aurora DB instances. You can enable or disable deletion protection for the DB cluster. For more information, see `CreateDBCluster` . DB instances in a DB cluster can be deleted even when deletion protection is enabled for the DB cluster.", "Domain": "The Active Directory directory ID to create the DB instance in. Currently, only Db2, MySQL, Microsoft SQL Server, Oracle, and PostgreSQL DB instances can be created in an Active Directory Domain.\n\nFor more information, see [Kerberos Authentication](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/kerberos-authentication.html) in the *Amazon RDS User Guide* .", "DomainAuthSecretArn": "The ARN for the Secrets Manager secret with the credentials for the user joining the domain.\n\nExample: `arn:aws:secretsmanager:region:account-number:secret:myselfmanagedADtestsecret-123456`", "DomainDnsIps": "The IPv4 DNS IP addresses of your primary and secondary Active Directory domain controllers.\n\nConstraints:\n\n- Two IP addresses must be provided. If there isn't a secondary domain controller, use the IP address of the primary domain controller for both entries in the list.\n\nExample: `123.124.125.126,234.235.236.237`", @@ -37919,22 +39266,22 @@ "MasterUserSecret": "The secret managed by RDS in AWS Secrets Manager for the master user password.\n\nFor more information, see [Password management with AWS Secrets Manager](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-secrets-manager.html) in the *Amazon RDS User Guide.*", "MasterUsername": "The master user name for the DB instance.\n\n> If you specify the `SourceDBInstanceIdentifier` or `DBSnapshotIdentifier` property, don't specify this property. The value is inherited from the source DB instance or snapshot.\n> \n> When migrating a self-managed Db2 database, we recommend that you use the same master username as your self-managed Db2 instance name. \n\n*Amazon Aurora*\n\nNot applicable. The name for the master user is managed by the DB cluster.\n\n*RDS for Db2*\n\nConstraints:\n\n- Must be 1 to 16 letters or numbers.\n- First character must be a letter.\n- Can't be a reserved word for the chosen database engine.\n\n*RDS for MariaDB*\n\nConstraints:\n\n- Must be 1 to 16 letters or numbers.\n- Can't be a reserved word for the chosen database engine.\n\n*RDS for Microsoft SQL Server*\n\nConstraints:\n\n- Must be 1 to 128 letters or numbers.\n- First character must be a letter.\n- Can't be a reserved word for the chosen database engine.\n\n*RDS for MySQL*\n\nConstraints:\n\n- Must be 1 to 16 letters or numbers.\n- First character must be a letter.\n- Can't be a reserved word for the chosen database engine.\n\n*RDS for Oracle*\n\nConstraints:\n\n- Must be 1 to 30 letters or numbers.\n- First character must be a letter.\n- Can't be a reserved word for the chosen database engine.\n\n*RDS for PostgreSQL*\n\nConstraints:\n\n- Must be 1 to 63 letters or numbers.\n- First character must be a letter.\n- Can't be a reserved word for the chosen database engine.", "MaxAllocatedStorage": "The upper limit in gibibytes (GiB) to which Amazon RDS can automatically scale the storage of the DB instance.\n\nFor more information about this setting, including limitations that apply to it, see [Managing capacity automatically with Amazon RDS storage autoscaling](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_PIOPS.StorageTypes.html#USER_PIOPS.Autoscaling) in the *Amazon RDS User Guide* .\n\nThis setting doesn't apply to the following DB instances:\n\n- Amazon Aurora (Storage is managed by the DB cluster.)\n- RDS Custom", - "MonitoringInterval": "The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance. To disable collection of Enhanced Monitoring metrics, specify 0. The default is 0.\n\nIf `MonitoringRoleArn` is specified, then you must set `MonitoringInterval` to a value other than 0.\n\nThis setting doesn't apply to RDS Custom.\n\nValid Values: `0, 1, 5, 10, 15, 30, 60`", + "MonitoringInterval": "The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance. To disable collection of Enhanced Monitoring metrics, specify `0` .\n\nIf `MonitoringRoleArn` is specified, then you must set `MonitoringInterval` to a value other than `0` .\n\nThis setting doesn't apply to RDS Custom DB instances.\n\nValid Values: `0 | 1 | 5 | 10 | 15 | 30 | 60`\n\nDefault: `0`", "MonitoringRoleArn": "The ARN for the IAM role that permits RDS to send enhanced monitoring metrics to Amazon CloudWatch Logs. For example, `arn:aws:iam:123456789012:role/emaccess` . For information on creating a monitoring role, see [Setting Up and Enabling Enhanced Monitoring](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Monitoring.OS.html#USER_Monitoring.OS.Enabling) in the *Amazon RDS User Guide* .\n\nIf `MonitoringInterval` is set to a value other than `0` , then you must supply a `MonitoringRoleArn` value.\n\nThis setting doesn't apply to RDS Custom DB instances.", - "MultiAZ": "Specifies whether the database instance is a Multi-AZ DB instance deployment. You can't set the `AvailabilityZone` parameter if the `MultiAZ` parameter is set to true.\n\nFor more information, see [Multi-AZ deployments for high availability](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.MultiAZ.html) in the *Amazon RDS User Guide* .\n\n*Amazon Aurora*\n\nNot applicable. Amazon Aurora storage is replicated across all of the Availability Zones and doesn't require the `MultiAZ` option to be set.", + "MultiAZ": "Specifies whether the DB instance is a Multi-AZ deployment. You can't set the `AvailabilityZone` parameter if the DB instance is a Multi-AZ deployment.\n\nThis setting doesn't apply to the following DB instances:\n\n- Amazon Aurora (DB instance Availability Zones (AZs) are managed by the DB cluster.)\n- RDS Custom", "NcharCharacterSetName": "The name of the NCHAR character set for the Oracle DB instance.\n\nThis setting doesn't apply to RDS Custom DB instances.", "NetworkType": "The network type of the DB instance.\n\nValid values:\n\n- `IPV4`\n- `DUAL`\n\nThe network type is determined by the `DBSubnetGroup` specified for the DB instance. A `DBSubnetGroup` can support only the IPv4 protocol or the IPv4 and IPv6 protocols ( `DUAL` ).\n\nFor more information, see [Working with a DB instance in a VPC](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_VPC.WorkingWithRDSInstanceinaVPC.html) in the *Amazon RDS User Guide.*", "OptionGroupName": "Indicates that the DB instance should be associated with the specified option group.\n\nPermanent options, such as the TDE option for Oracle Advanced Security TDE, can't be removed from an option group. Also, that option group can't be removed from a DB instance once it is associated with a DB instance.", "PerformanceInsightsKMSKeyId": "The AWS KMS key identifier for encryption of Performance Insights data.\n\nThe KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key.\n\nIf you do not specify a value for `PerformanceInsightsKMSKeyId` , then Amazon RDS uses your default KMS key. There is a default KMS key for your AWS account. Your AWS account has a different default KMS key for each AWS Region.\n\nFor information about enabling Performance Insights, see [EnablePerformanceInsights](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-rds-database-instance.html#cfn-rds-dbinstance-enableperformanceinsights) .", "PerformanceInsightsRetentionPeriod": "The number of days to retain Performance Insights data.\n\nThis setting doesn't apply to RDS Custom DB instances.\n\nValid Values:\n\n- `7`\n- *month* * 31, where *month* is a number of months from 1-23. Examples: `93` (3 months * 31), `341` (11 months * 31), `589` (19 months * 31)\n- `731`\n\nDefault: `7` days\n\nIf you specify a retention period that isn't valid, such as `94` , Amazon RDS returns an error.", - "Port": "The port number on which the database accepts connections.\n\n*Amazon Aurora*\n\nNot applicable. The port number is managed by the DB cluster.\n\n*Db2*\n\nDefault value: `50000`", + "Port": "The port number on which the database accepts connections.\n\nThis setting doesn't apply to Aurora DB instances. The port number is managed by the cluster.\n\nValid Values: `1150-65535`\n\nDefault:\n\n- RDS for Db2 - `50000`\n- RDS for MariaDB - `3306`\n- RDS for Microsoft SQL Server - `1433`\n- RDS for MySQL - `3306`\n- RDS for Oracle - `1521`\n- RDS for PostgreSQL - `5432`\n\nConstraints:\n\n- For RDS for Microsoft SQL Server, the value can't be `1234` , `1434` , `3260` , `3343` , `3389` , `47001` , or `49152-49156` .", "PreferredBackupWindow": "The daily time range during which automated backups are created if automated backups are enabled, using the `BackupRetentionPeriod` parameter. For more information, see [Backup Window](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_WorkingWithAutomatedBackups.html#USER_WorkingWithAutomatedBackups.BackupWindow) in the *Amazon RDS User Guide.*\n\nConstraints:\n\n- Must be in the format `hh24:mi-hh24:mi` .\n- Must be in Universal Coordinated Time (UTC).\n- Must not conflict with the preferred maintenance window.\n- Must be at least 30 minutes.\n\n*Amazon Aurora*\n\nNot applicable. The daily time range for creating automated backups is managed by the DB cluster.", "PreferredMaintenanceWindow": "The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).\n\nFormat: `ddd:hh24:mi-ddd:hh24:mi`\n\nThe default is a 30-minute window selected at random from an 8-hour block of time for each AWS Region, occurring on a random day of the week. To see the time blocks available, see [Adjusting the Preferred DB Instance Maintenance Window](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_UpgradeDBInstance.Maintenance.html#AdjustingTheMaintenanceWindow) in the *Amazon RDS User Guide.*\n\n> This property applies when AWS CloudFormation initially creates the DB instance. If you use AWS CloudFormation to update the DB instance, those updates are applied immediately. \n\nConstraints: Minimum 30-minute window.", "ProcessorFeatures": "The number of CPU cores and the number of threads per core for the DB instance class of the DB instance.\n\nThis setting doesn't apply to Amazon Aurora or RDS Custom DB instances.", "PromotionTier": "The order of priority in which an Aurora Replica is promoted to the primary instance after a failure of the existing primary instance. For more information, see [Fault Tolerance for an Aurora DB Cluster](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Concepts.AuroraHighAvailability.html#Aurora.Managing.FaultTolerance) in the *Amazon Aurora User Guide* .\n\nThis setting doesn't apply to RDS Custom DB instances.\n\nDefault: `1`\n\nValid Values: `0 - 15`", "PubliclyAccessible": "Indicates whether the DB instance is an internet-facing instance. If you specify true, AWS CloudFormation creates an instance with a publicly resolvable DNS name, which resolves to a public IP address. If you specify false, AWS CloudFormation creates an internal instance with a DNS name that resolves to a private IP address.\n\nThe default behavior value depends on your VPC setup and the database subnet group. For more information, see the `PubliclyAccessible` parameter in the [CreateDBInstance](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_CreateDBInstance.html) in the *Amazon RDS API Reference* .", "ReplicaMode": "The open mode of an Oracle read replica. For more information, see [Working with Oracle Read Replicas for Amazon RDS](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/oracle-read-replicas.html) in the *Amazon RDS User Guide* .\n\nThis setting is only supported in RDS for Oracle.\n\nDefault: `open-read-only`\n\nValid Values: `open-read-only` or `mounted`", - "RestoreTime": "The date and time to restore from.\n\nConstraints:\n\n- Must be a time in Universal Coordinated Time (UTC) format.\n- Must be before the latest restorable time for the DB instance.\n- Can't be specified if the `UseLatestRestorableTime` parameter is enabled.\n\nExample: `2009-09-07T23:45:00Z`", + "RestoreTime": "The date and time to restore from. This parameter applies to point-in-time recovery. For more information, see [Restoring a DB instance to a specified time](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_PIT.html) in the in the *Amazon RDS User Guide* .\n\nConstraints:\n\n- Must be a time in Universal Coordinated Time (UTC) format.\n- Must be before the latest restorable time for the DB instance.\n- Can't be specified if the `UseLatestRestorableTime` parameter is enabled.\n\nExample: `2009-09-07T23:45:00Z`", "SourceDBClusterIdentifier": "The identifier of the Multi-AZ DB cluster that will act as the source for the read replica. Each DB cluster can have up to 15 read replicas.\n\nConstraints:\n\n- Must be the identifier of an existing Multi-AZ DB cluster.\n- Can't be specified if the `SourceDBInstanceIdentifier` parameter is also specified.\n- The specified DB cluster must have automatic backups enabled, that is, its backup retention period must be greater than 0.\n- The source DB cluster must be in the same AWS Region as the read replica. Cross-Region replication isn't supported.", "SourceDBInstanceAutomatedBackupsArn": "The Amazon Resource Name (ARN) of the replicated automated backups from which to restore, for example, `arn:aws:rds:us-east-1:123456789012:auto-backup:ab-L2IJCEXJP7XQ7HOJ4SIEXAMPLE` .\n\nThis setting doesn't apply to RDS Custom.", "SourceDBInstanceIdentifier": "If you want to create a read replica DB instance, specify the ID of the source DB instance. Each DB instance can have a limited number of read replicas. For more information, see [Working with Read Replicas](https://docs.aws.amazon.com/AmazonRDS/latest/DeveloperGuide/USER_ReadRepl.html) in the *Amazon RDS User Guide* .\n\nFor information about constraints that apply to DB instance identifiers, see [Naming constraints in Amazon RDS](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Limits.html#RDS_Limits.Constraints) in the *Amazon RDS User Guide* .\n\nThe `SourceDBInstanceIdentifier` property determines whether a DB instance is a read replica. If you remove the `SourceDBInstanceIdentifier` property from your template and then update your stack, AWS CloudFormation promotes the read replica to a standalone DB instance.\n\nIf you specify the `UseLatestRestorableTime` or `RestoreTime` properties in conjunction with the `SourceDBInstanceIdentifier` property, RDS restores the DB instance to the requested point in time, thereby creating a new DB instance.\n\n> - If you specify a source DB instance that uses VPC security groups, we recommend that you specify the `VPCSecurityGroups` property. If you don't specify the property, the read replica inherits the value of the `VPCSecurityGroups` property from the source DB when you create the replica. However, if you update the stack, AWS CloudFormation reverts the replica's `VPCSecurityGroups` property to the default value because it's not defined in the stack's template. This change might cause unexpected issues.\n> - Read replicas don't support deletion policies. AWS CloudFormation ignores any deletion policy that's associated with a read replica.\n> - If you specify `SourceDBInstanceIdentifier` , don't specify the `DBSnapshotIdentifier` property. You can't create a read replica from a snapshot.\n> - Don't set the `BackupRetentionPeriod` , `DBName` , `MasterUsername` , `MasterUserPassword` , and `PreferredBackupWindow` properties. The database attributes are inherited from the source DB instance, and backups are disabled for read replicas.\n> - If the source DB instance is in a different region than the read replica, specify the source region in `SourceRegion` , and specify an ARN for a valid DB instance in `SourceDBInstanceIdentifier` . For more information, see [Constructing a Amazon RDS Amazon Resource Name (ARN)](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html#USER_Tagging.ARN) in the *Amazon RDS User Guide* .\n> - For DB instances in Amazon Aurora clusters, don't specify this property. Amazon RDS automatically assigns writer and reader DB instances.", @@ -37943,10 +39290,10 @@ "StorageEncrypted": "A value that indicates whether the DB instance is encrypted. By default, it isn't encrypted.\n\nIf you specify the `KmsKeyId` property, then you must enable encryption.\n\nIf you specify the `SourceDBInstanceIdentifier` or `SourceDbiResourceId` property, don't specify this property. The value is inherited from the source DB instance, and if the DB instance is encrypted, the specified `KmsKeyId` property is used.\n\nIf you specify the `SourceDBInstanceAutomatedBackupsArn` property, don't specify this property. The value is inherited from the source DB instance automated backup.\n\nIf you specify `DBSnapshotIdentifier` property, don't specify this property. The value is inherited from the snapshot.\n\n*Amazon Aurora*\n\nNot applicable. The encryption for DB instances is managed by the DB cluster.", "StorageThroughput": "Specifies the storage throughput value for the DB instance. This setting applies only to the `gp3` storage type.\n\nThis setting doesn't apply to RDS Custom or Amazon Aurora.", "StorageType": "The storage type to associate with the DB instance.\n\nIf you specify `io1` , `io2` , or `gp3` , you must also include a value for the `Iops` parameter.\n\nThis setting doesn't apply to Amazon Aurora DB instances. Storage is managed by the DB cluster.\n\nValid Values: `gp2 | gp3 | io1 | io2 | standard`\n\nDefault: `io1` , if the `Iops` parameter is specified. Otherwise, `gp2` .", - "Tags": "An optional array of key-value pairs to apply to this DB instance.", + "Tags": "Tags to assign to the DB instance.", "Timezone": "The time zone of the DB instance. The time zone parameter is currently supported only by [RDS for Db2](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/db2-time-zone) and [RDS for SQL Server](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_SQLServer.html#SQLServer.Concepts.General.TimeZone) .", "UseDefaultProcessorFeatures": "Specifies whether the DB instance class of the DB instance uses its default processor features.\n\nThis setting doesn't apply to RDS Custom DB instances.", - "UseLatestRestorableTime": "Specifies whether the DB instance is restored from the latest backup time. By default, the DB instance isn't restored from the latest backup time.\n\nConstraints:\n\n- Can't be specified if the `RestoreTime` parameter is provided.", + "UseLatestRestorableTime": "Specifies whether the DB instance is restored from the latest backup time. By default, the DB instance isn't restored from the latest backup time. This parameter applies to point-in-time recovery. For more information, see [Restoring a DB instance to a specified time](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_PIT.html) in the in the *Amazon RDS User Guide* .\n\nConstraints:\n\n- Can't be specified if the `RestoreTime` parameter is provided.", "VPCSecurityGroups": "A list of the VPC security group IDs to assign to the DB instance. The list can include both the physical IDs of existing VPC security groups and references to [AWS::EC2::SecurityGroup](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-security-group.html) resources created in the template.\n\nIf you plan to update the resource, don't specify VPC security groups in a shared VPC.\n\nIf you set `VPCSecurityGroups` , you must not set [`DBSecurityGroups`](https://docs.aws.amazon.com//AWSCloudFormation/latest/UserGuide/aws-properties-rds-database-instance.html#cfn-rds-dbinstance-dbsecuritygroups) , and vice versa.\n\n> You can migrate a DB instance in your stack from an RDS DB security group to a VPC security group, but keep the following in mind:\n> \n> - You can't revert to using an RDS security group after you establish a VPC security group membership.\n> - When you migrate your DB instance to VPC security groups, if your stack update rolls back because the DB instance update fails or because an update fails in another AWS CloudFormation resource, the rollback fails because it can't revert to an RDS security group.\n> - To use the properties that are available when you use a VPC security group, you must recreate the DB instance. If you don't, AWS CloudFormation submits only the property values that are listed in the [`DBSecurityGroups`](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-rds-database-instance.html#cfn-rds-dbinstance-dbsecuritygroups) property. \n\nTo avoid this situation, migrate your DB instance to using VPC security groups only when that is the only change in your stack template.\n\n*Amazon Aurora*\n\nNot applicable. The associated list of EC2 VPC security groups is managed by the DB cluster. If specified, the setting must match the DB cluster setting." }, "AWS::RDS::DBInstance CertificateDetails": { @@ -37964,7 +39311,7 @@ }, "AWS::RDS::DBInstance MasterUserSecret": { "KmsKeyId": "The AWS KMS key identifier that is used to encrypt the secret.", - "SecretArn": "The Amazon Resource Name (ARN) of the secret." + "SecretArn": "The Amazon Resource Name (ARN) of the secret. This parameter is a return value that you can retrieve using the `Fn::GetAtt` intrinsic function. For more information, see [Return values](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-rds-dbinstance.html#aws-resource-rds-dbinstance-return-values) ." }, "AWS::RDS::DBInstance ProcessorFeature": { "Name": "The name of the processor feature. Valid names are `coreCount` and `threadsPerCore` .", @@ -37977,9 +39324,9 @@ "AWS::RDS::DBParameterGroup": { "DBParameterGroupName": "The name of the DB parameter group.\n\nConstraints:\n\n- Must be 1 to 255 letters, numbers, or hyphens.\n- First character must be a letter\n- Can't end with a hyphen or contain two consecutive hyphens\n\nIf you don't specify a value for `DBParameterGroupName` property, a name is automatically created for the DB parameter group.\n\n> This value is stored as a lowercase string.", "Description": "Provides the customer-specified description for this DB parameter group.", - "Family": "The DB parameter group family name. A DB parameter group can be associated with one and only one DB parameter group family, and can be applied only to a DB instance running a DB engine and engine version compatible with that DB parameter group family.\n\n> The DB parameter group family can't be changed when updating a DB parameter group. \n\nTo list all of the available parameter group families, use the following command:\n\n`aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\"`\n\nThe output contains duplicates.\n\nFor more information, see `[CreateDBParameterGroup](https://docs.aws.amazon.com//AmazonRDS/latest/APIReference/API_CreateDBParameterGroup.html)` .", - "Parameters": "An array of parameter names and values for the parameter update. At least one parameter name and value must be supplied. Subsequent arguments are optional.\n\nRDS for Db2 requires you to bring your own Db2 license. You must enter your IBM customer ID ( `rds.ibm_customer_id` ) and site number ( `rds.ibm_site_id` ) before starting a Db2 instance.\n\nFor more information about DB parameters and DB parameter groups for Amazon RDS DB engines, see [Working with DB Parameter Groups](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_WorkingWithParamGroups.html) in the *Amazon RDS User Guide* .\n\nFor more information about DB cluster and DB instance parameters and parameter groups for Amazon Aurora DB engines, see [Working with DB Parameter Groups and DB Cluster Parameter Groups](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_WorkingWithParamGroups.html) in the *Amazon Aurora User Guide* .\n\n> AWS CloudFormation doesn't support specifying an apply method for each individual parameter. The default apply method for each parameter is used.", - "Tags": "An optional array of key-value pairs to apply to this DB parameter group.\n\n> Currently, this is the only property that supports drift detection." + "Family": "The DB parameter group family name. A DB parameter group can be associated with one and only one DB parameter group family, and can be applied only to a DB instance running a database engine and engine version compatible with that DB parameter group family.\n\nTo list all of the available parameter group families for a DB engine, use the following command:\n\n`aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\" --engine `\n\nFor example, to list all of the available parameter group families for the MySQL DB engine, use the following command:\n\n`aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\" --engine mysql`\n\n> The output contains duplicates. \n\nThe following are the valid DB engine values:\n\n- `aurora-mysql`\n- `aurora-postgresql`\n- `db2-ae`\n- `db2-se`\n- `mysql`\n- `oracle-ee`\n- `oracle-ee-cdb`\n- `oracle-se2`\n- `oracle-se2-cdb`\n- `postgres`\n- `sqlserver-ee`\n- `sqlserver-se`\n- `sqlserver-ex`\n- `sqlserver-web`", + "Parameters": "An array of parameter names and values for the parameter update. You must specify at least one parameter name and value.\n\nFor more information about parameter groups, see [Working with parameter groups](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_WorkingWithParamGroups.html) in the *Amazon RDS User Guide* , or [Working with parameter groups](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_WorkingWithParamGroups.html) in the *Amazon Aurora User Guide* .\n\n> AWS CloudFormation doesn't support specifying an apply method for each individual parameter. The default apply method for each parameter is used.", + "Tags": "Tags to assign to the DB parameter group." }, "AWS::RDS::DBParameterGroup Tag": { "Key": "A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and can't be prefixed with `aws:` or `rds:` . The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$\").", @@ -37989,7 +39336,7 @@ "Auth": "The authorization mechanism that the proxy uses.", "DBProxyName": "The identifier for the proxy. This name must be unique for all proxies owned by your AWS account in the specified AWS Region . An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens; it can't end with a hyphen or contain two consecutive hyphens.", "DebugLogging": "Specifies whether the proxy includes detailed information about SQL statements in its logs. This information helps you to debug issues involving SQL behavior or the performance and scalability of the proxy connections. The debug information includes the text of SQL statements that you submit through the proxy. Thus, only enable this setting when needed for debugging, and only when you have security measures in place to safeguard any sensitive information that appears in the logs.", - "EngineFamily": "The kinds of databases that the proxy can connect to. This value determines which database network protocol the proxy recognizes when it interprets network traffic to and from the database. For Aurora MySQL, RDS for MariaDB, and RDS for MySQL databases, specify `MYSQL` . For Aurora PostgreSQL and RDS for PostgreSQL databases, specify `POSTGRESQL` . For RDS for Microsoft SQL Server, specify `SQLSERVER` .\n\n*Valid Values* : `MYSQL` | `POSTGRESQL` | `SQLSERVER`", + "EngineFamily": "The kinds of databases that the proxy can connect to. This value determines which database network protocol the proxy recognizes when it interprets network traffic to and from the database. For Aurora MySQL, RDS for MariaDB, and RDS for MySQL databases, specify `MYSQL` . For Aurora PostgreSQL and RDS for PostgreSQL databases, specify `POSTGRESQL` . For RDS for Microsoft SQL Server, specify `SQLSERVER` .", "IdleClientTimeout": "The number of seconds that a connection to the proxy can be inactive before the proxy disconnects it. You can set this value higher or lower than the connection timeout limit for the associated database.", "RequireTLS": "Specifies whether Transport Layer Security (TLS) encryption is required for connections to the proxy. By enabling this setting, you can enforce encrypted TLS connections to the proxy.", "RoleArn": "The Amazon Resource Name (ARN) of the IAM role that the proxy uses to access secrets in AWS Secrets Manager.", @@ -37998,37 +39345,37 @@ "VpcSubnetIds": "One or more VPC subnet IDs to associate with the new proxy." }, "AWS::RDS::DBProxy AuthFormat": { - "AuthScheme": "The type of authentication that the proxy uses for connections from the proxy to the underlying database.\n\nValid Values: `SECRETS`", + "AuthScheme": "The type of authentication that the proxy uses for connections from the proxy to the underlying database.", "ClientPasswordAuthType": "Specifies the details of authentication used by a proxy to log in as a specific database user.", "Description": "A user-specified description about the authentication used by a proxy to log in as a specific database user.", - "IAMAuth": "Whether to require or disallow AWS Identity and Access Management (IAM) authentication for connections to the proxy. The `ENABLED` value is valid only for proxies with RDS for Microsoft SQL Server.\n\nValid Values: `ENABLED | DISABLED | REQUIRED`", + "IAMAuth": "A value that indicates whether to require or disallow AWS Identity and Access Management (IAM) authentication for connections to the proxy. The `ENABLED` value is valid only for proxies with RDS for Microsoft SQL Server.", "SecretArn": "The Amazon Resource Name (ARN) representing the secret that the proxy uses to authenticate to the RDS DB instance or Aurora DB cluster. These secrets are stored within Amazon Secrets Manager." }, "AWS::RDS::DBProxy TagFormat": { - "Key": "A key is the required name of the tag. The string value can be 1-128 Unicode characters in length and can't be prefixed with `aws:` . The string can contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\").", - "Value": "A value is the optional value of the tag. The string value can be 1-256 Unicode characters in length and can't be prefixed with `aws:` . The string can contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\")." + "Key": "A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and can't be prefixed with `aws:` or `rds:` . The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$\").", + "Value": "A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and can't be prefixed with `aws:` or `rds:` . The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$\")." }, "AWS::RDS::DBProxyEndpoint": { "DBProxyEndpointName": "The name of the DB proxy endpoint to create.", "DBProxyName": "The name of the DB proxy associated with the DB proxy endpoint that you create.", "Tags": "An optional set of key-value pairs to associate arbitrary data of your choosing with the proxy.", - "TargetRole": "A value that indicates whether the DB proxy endpoint can be used for read/write or read-only operations.\n\nValid Values: `READ_WRITE | READ_ONLY`", + "TargetRole": "A value that indicates whether the DB proxy endpoint can be used for read/write or read-only operations.", "VpcSecurityGroupIds": "The VPC security group IDs for the DB proxy endpoint that you create. You can specify a different set of security group IDs than for the original DB proxy. The default is the default security group for the VPC.", "VpcSubnetIds": "The VPC subnet IDs for the DB proxy endpoint that you create. You can specify a different set of subnet IDs than for the original DB proxy." }, "AWS::RDS::DBProxyEndpoint TagFormat": { - "Key": "A value is the optional value of the tag. The string value can be 1-256 Unicode characters in length and can't be prefixed with `aws:` . The string can contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\").", - "Value": "Metadata assigned to a DB instance consisting of a key-value pair." + "Key": "A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and can't be prefixed with `aws:` or `rds:` . The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$\").", + "Value": "A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and can't be prefixed with `aws:` or `rds:` . The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$\")." }, "AWS::RDS::DBProxyTargetGroup": { - "ConnectionPoolConfigurationInfo": "Settings that control the size and behavior of the connection pool associated with a `DBProxyTargetGroup` .", + "ConnectionPoolConfigurationInfo": "Displays the settings that control the size and behavior of the connection pool associated with a `DBProxyTarget` .", "DBClusterIdentifiers": "One or more DB cluster identifiers.", "DBInstanceIdentifiers": "One or more DB instance identifiers.", "DBProxyName": "The identifier of the `DBProxy` that is associated with the `DBProxyTargetGroup` .", "TargetGroupName": "The identifier for the target group.\n\n> Currently, this property must be set to `default` ." }, "AWS::RDS::DBProxyTargetGroup ConnectionPoolConfigurationInfoFormat": { - "ConnectionBorrowTimeout": "The number of seconds for a proxy to wait for a connection to become available in the connection pool. This setting only applies when the proxy has opened its maximum number of connections and all connections are busy with client sessions. For an unlimited wait time, specify `0` .\n\nDefault: `120`\n\nConstraints:\n\n- Must be between 0 and 3600.", + "ConnectionBorrowTimeout": "The number of seconds for a proxy to wait for a connection to become available in the connection pool. This setting only applies when the proxy has opened its maximum number of connections and all connections are busy with client sessions.\n\nDefault: `120`\n\nConstraints:\n\n- Must be between 0 and 3600.", "InitQuery": "One or more SQL statements for the proxy to run when opening each new database connection. Typically used with `SET` statements to make sure that each connection has identical settings such as time zone and character set. For multiple statements, use semicolons as the separator. You can also include multiple variables in a single `SET` statement, such as `SET x=1, y=2` .\n\nDefault: no initialization query", "MaxConnectionsPercent": "The maximum size of the connection pool for each target in a target group. The value is expressed as a percentage of the `max_connections` setting for the RDS DB instance or Aurora DB cluster used by the target group.\n\nIf you specify `MaxIdleConnectionsPercent` , then you must also include a value for this parameter.\n\nDefault: `10` for RDS for Microsoft SQL Server, and `100` for all other engines\n\nConstraints:\n\n- Must be between 1 and 100.", "MaxIdleConnectionsPercent": "A value that controls how actively the proxy closes idle database connections in the connection pool. The value is expressed as a percentage of the `max_connections` setting for the RDS DB instance or Aurora DB cluster used by the target group. With a high value, the proxy leaves a high percentage of idle database connections open. A low value causes the proxy to close more idle connections and return them to the database.\n\nIf you specify this parameter, then you must also include a value for `MaxConnectionsPercent` .\n\nDefault: The default value is half of the value of `MaxConnectionsPercent` . For example, if `MaxConnectionsPercent` is 80, then the default value of `MaxIdleConnectionsPercent` is 40. If the value of `MaxConnectionsPercent` isn't specified, then for SQL Server, `MaxIdleConnectionsPercent` is `5` , and for all other engines, the default is `50` .\n\nConstraints:\n\n- Must be between 0 and the value of `MaxConnectionsPercent` .", @@ -38036,9 +39383,9 @@ }, "AWS::RDS::DBSecurityGroup": { "DBSecurityGroupIngress": "Ingress rules to be applied to the DB security group.", - "EC2VpcId": "The identifier of an Amazon VPC. This property indicates the VPC that this DB security group belongs to.\n\n> The `EC2VpcId` property is for backward compatibility with older regions, and is no longer recommended for providing security information to an RDS DB instance.", + "EC2VpcId": "The identifier of an Amazon virtual private cloud (VPC). This property indicates the VPC that this DB security group belongs to.\n\n> This property is included for backwards compatibility and is no longer recommended for providing security information to an RDS DB instance.", "GroupDescription": "Provides the description of the DB security group.", - "Tags": "An optional array of key-value pairs to apply to this DB security group." + "Tags": "Metadata assigned to an Amazon RDS resource consisting of a key-value pair.\n\nFor more information, see [Tagging Amazon RDS resources](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) in the *Amazon RDS User Guide* or [Tagging Amazon Aurora and Amazon RDS resources](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_Tagging.html) in the *Amazon Aurora User Guide* ." }, "AWS::RDS::DBSecurityGroup Ingress": { "CIDRIP": "The IP range to authorize.", @@ -38059,9 +39406,9 @@ }, "AWS::RDS::DBSubnetGroup": { "DBSubnetGroupDescription": "The description for the DB subnet group.", - "DBSubnetGroupName": "The name for the DB subnet group. This value is stored as a lowercase string.\n\nConstraints: Must contain no more than 255 lowercase alphanumeric characters or hyphens. Must not be \"Default\".\n\nExample: `mysubnetgroup`", + "DBSubnetGroupName": "The name for the DB subnet group. This value is stored as a lowercase string.\n\nConstraints:\n\n- Must contain no more than 255 letters, numbers, periods, underscores, spaces, or hyphens.\n- Must not be default.\n- First character must be a letter.\n\nExample: `mydbsubnetgroup`", "SubnetIds": "The EC2 Subnet IDs for the DB subnet group.", - "Tags": "An optional array of key-value pairs to apply to this DB subnet group." + "Tags": "Tags to assign to the DB subnet group." }, "AWS::RDS::DBSubnetGroup Tag": { "Key": "A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and can't be prefixed with `aws:` or `rds:` . The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$\").", @@ -38071,8 +39418,8 @@ "Enabled": "Specifies whether to activate the subscription. If the event notification subscription isn't activated, the subscription is created but not active.", "EventCategories": "A list of event categories for a particular source type ( `SourceType` ) that you want to subscribe to. You can see a list of the categories for a given source type in the \"Amazon RDS event categories and event messages\" section of the [*Amazon RDS User Guide*](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Events.Messages.html) or the [*Amazon Aurora User Guide*](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_Events.Messages.html) . You can also see this list by using the `DescribeEventCategories` operation.", "SnsTopicArn": "The Amazon Resource Name (ARN) of the SNS topic created for event notification. SNS automatically creates the ARN when you create a topic and subscribe to it.\n\n> RDS doesn't support FIFO (first in, first out) topics. For more information, see [Message ordering and deduplication (FIFO topics)](https://docs.aws.amazon.com/sns/latest/dg/sns-fifo-topics.html) in the *Amazon Simple Notification Service Developer Guide* .", - "SourceIds": "The list of identifiers of the event sources for which events are returned. If not specified, then all sources are included in the response. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens. It can't end with a hyphen or contain two consecutive hyphens.\n\nConstraints:\n\n- If a `SourceIds` value is supplied, `SourceType` must also be provided.\n- If the source type is a DB instance, a `DBInstanceIdentifier` value must be supplied.\n- If the source type is a DB cluster, a `DBClusterIdentifier` value must be supplied.\n- If the source type is a DB parameter group, a `DBParameterGroupName` value must be supplied.\n- If the source type is a DB security group, a `DBSecurityGroupName` value must be supplied.\n- If the source type is a DB snapshot, a `DBSnapshotIdentifier` value must be supplied.\n- If the source type is a DB cluster snapshot, a `DBClusterSnapshotIdentifier` value must be supplied.", - "SourceType": "The type of source that is generating the events. For example, if you want to be notified of events generated by a DB instance, set this parameter to `db-instance` . If this value isn't specified, all events are returned.\n\nValid values: `db-instance` | `db-cluster` | `db-parameter-group` | `db-security-group` | `db-snapshot` | `db-cluster-snapshot`", + "SourceIds": "The list of identifiers of the event sources for which events are returned. If not specified, then all sources are included in the response. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens. It can't end with a hyphen or contain two consecutive hyphens.\n\nConstraints:\n\n- If `SourceIds` are supplied, `SourceType` must also be provided.\n- If the source type is a DB instance, a `DBInstanceIdentifier` value must be supplied.\n- If the source type is a DB cluster, a `DBClusterIdentifier` value must be supplied.\n- If the source type is a DB parameter group, a `DBParameterGroupName` value must be supplied.\n- If the source type is a DB security group, a `DBSecurityGroupName` value must be supplied.\n- If the source type is a DB snapshot, a `DBSnapshotIdentifier` value must be supplied.\n- If the source type is a DB cluster snapshot, a `DBClusterSnapshotIdentifier` value must be supplied.\n- If the source type is an RDS Proxy, a `DBProxyName` value must be supplied.", + "SourceType": "The type of source that is generating the events. For example, if you want to be notified of events generated by a DB instance, you set this parameter to `db-instance` . For RDS Proxy events, specify `db-proxy` . If this value isn't specified, all events are returned.\n\nValid Values: `db-instance | db-cluster | db-parameter-group | db-security-group | db-snapshot | db-cluster-snapshot | db-proxy | zero-etl | custom-engine-version | blue-green-deployment`", "SubscriptionName": "The name of the subscription.\n\nConstraints: The name must be less than 255 characters.", "Tags": "An optional array of key-value pairs to apply to this subscription." }, @@ -38087,7 +39434,12 @@ "EngineVersion": "The engine version to use for this global database cluster.\n\nConstraints:\n\n- Can't be specified if `SourceDBClusterIdentifier` is specified. In this case, Amazon Aurora uses the engine version of the source DB cluster.", "GlobalClusterIdentifier": "The cluster identifier for this global database cluster. This parameter is stored as a lowercase string.", "SourceDBClusterIdentifier": "The Amazon Resource Name (ARN) to use as the primary cluster of the global database.\n\nIf you provide a value for this parameter, don't specify values for the following settings because Amazon Aurora uses the values from the specified source DB cluster:\n\n- `DatabaseName`\n- `Engine`\n- `EngineVersion`\n- `StorageEncrypted`", - "StorageEncrypted": "Specifies whether to enable storage encryption for the new global database cluster.\n\nConstraints:\n\n- Can't be specified if `SourceDBClusterIdentifier` is specified. In this case, Amazon Aurora uses the setting from the source DB cluster." + "StorageEncrypted": "Specifies whether to enable storage encryption for the new global database cluster.\n\nConstraints:\n\n- Can't be specified if `SourceDBClusterIdentifier` is specified. In this case, Amazon Aurora uses the setting from the source DB cluster.", + "Tags": "Metadata assigned to an Amazon RDS resource consisting of a key-value pair.\n\nFor more information, see [Tagging Amazon RDS resources](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) in the *Amazon RDS User Guide* or [Tagging Amazon Aurora and Amazon RDS resources](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_Tagging.html) in the *Amazon Aurora User Guide* ." + }, + "AWS::RDS::GlobalCluster Tag": { + "Key": "A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and can't be prefixed with `aws:` or `rds:` . The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$\").", + "Value": "A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and can't be prefixed with `aws:` or `rds:` . The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$\")." }, "AWS::RDS::Integration": { "AdditionalEncryptionContext": "An optional set of non-secret key\u2013value pairs that contains additional contextual information about the data. For more information, see [Encryption context](https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) in the *AWS Key Management Service Developer Guide* .\n\nYou can only include this parameter if you specify the `KMSKeyId` parameter.", @@ -38096,7 +39448,7 @@ "IntegrationName": "The name of the integration.", "KMSKeyId": "The AWS Key Management System ( AWS KMS) key identifier for the key to use to encrypt the integration. If you don't specify an encryption key, RDS uses a default AWS owned key.", "SourceArn": "The Amazon Resource Name (ARN) of the database to use as the source for replication.", - "Tags": "A list of tags. For more information, see [Tagging Amazon RDS Resources](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) in the *Amazon RDS User Guide.* .", + "Tags": "An optional array of key-value pairs to apply to this integration.", "TargetArn": "The ARN of the Redshift data warehouse to use as the target for replication." }, "AWS::RDS::Integration Tag": { @@ -38106,18 +39458,18 @@ "AWS::RDS::OptionGroup": { "EngineName": "Specifies the name of the engine that this option group should be associated with.\n\nValid Values:\n\n- `mariadb`\n- `mysql`\n- `oracle-ee`\n- `oracle-ee-cdb`\n- `oracle-se2`\n- `oracle-se2-cdb`\n- `postgres`\n- `sqlserver-ee`\n- `sqlserver-se`\n- `sqlserver-ex`\n- `sqlserver-web`", "MajorEngineVersion": "Specifies the major version of the engine that this option group should be associated with.", - "OptionConfigurations": "A list of options and the settings for each option.", + "OptionConfigurations": "A list of all available options for an option group.", "OptionGroupDescription": "The description of the option group.", "OptionGroupName": "The name of the option group to be created.\n\nConstraints:\n\n- Must be 1 to 255 letters, numbers, or hyphens\n- First character must be a letter\n- Can't end with a hyphen or contain two consecutive hyphens\n\nExample: `myoptiongroup`\n\nIf you don't specify a value for `OptionGroupName` property, a name is automatically created for the option group.\n\n> This value is stored as a lowercase string.", - "Tags": "An optional array of key-value pairs to apply to this option group." + "Tags": "Tags to assign to the option group." }, "AWS::RDS::OptionGroup OptionConfiguration": { - "DBSecurityGroupMemberships": "A list of DBSecurityGroupMembership name strings used for this option.", + "DBSecurityGroupMemberships": "A list of DB security groups used for this option.", "OptionName": "The configuration of options to include in a group.", "OptionSettings": "The option settings to include in an option group.", "OptionVersion": "The version for the option.", "Port": "The optional port for the option.", - "VpcSecurityGroupMemberships": "A list of VpcSecurityGroupMembership name strings used for this option." + "VpcSecurityGroupMemberships": "A list of VPC security group names used for this option." }, "AWS::RDS::OptionGroup OptionSetting": { "Name": "The name of the option that has settings that you can set.", @@ -38228,6 +39580,8 @@ }, "AWS::Redshift::Cluster LoggingProperties": { "BucketName": "The name of an existing S3 bucket where the log files are to be stored.\n\nConstraints:\n\n- Must be in the same region as the cluster\n- The cluster must have read bucket and put object permissions", + "LogDestinationType": "The log destination type. An enum with possible values of `s3` and `cloudwatch` .", + "LogExports": "The collection of exported log types. Possible values are `connectionlog` , `useractivitylog` , and `userlog` .", "S3KeyPrefix": "The prefix applied to the log file names.\n\nConstraints:\n\n- Cannot exceed 512 characters\n- Cannot contain spaces( ), double quotes (\"), single quotes ('), a backslash (\\), or control characters. The hexadecimal codes for invalid characters are:\n\n- x00 to x20\n- x22\n- x27\n- x5c\n- x7f or larger" }, "AWS::Redshift::Cluster Tag": { @@ -38753,6 +40107,7 @@ "Value": "The tag value." }, "AWS::RolesAnywhere::Profile": { + "AcceptRoleSessionName": "Used to determine if a custom role session name will be accepted in a temporary credential request.", "AttributeMappings": "A mapping applied to the authenticating end-entity certificate.", "DurationSeconds": "The number of seconds vended session credentials will be valid for", "Enabled": "The enabled status of the resource.", @@ -39186,6 +40541,7 @@ "ResourceId": "The ID of the Amazon VPC that is associated with the query logging configuration." }, "AWS::Route53Resolver::ResolverRule": { + "DelegationRecord": "", "DomainName": "DNS queries for this domain name are forwarded to the IP addresses that are specified in `TargetIps` . If a query matches multiple Resolver rules (example.com and www.example.com), the query is routed using the Resolver rule that contains the most specific domain name (www.example.com).", "Name": "The name for the Resolver rule, which you specified when you created the Resolver rule.", "ResolverEndpointId": "The ID of the endpoint that the rule is associated with.", @@ -39201,7 +40557,7 @@ "Ip": "One IPv4 address that you want to forward DNS queries to.", "Ipv6": "One IPv6 address that you want to forward DNS queries to.", "Port": "The port at `Ip` that you want to forward DNS queries to.", - "Protocol": "The protocols for the Resolver endpoints. DoH-FIPS is applicable for inbound endpoints only.\n\nFor an inbound endpoint you can apply the protocols as follows:\n\n- Do53 and DoH in combination.\n- Do53 and DoH-FIPS in combination.\n- Do53 alone.\n- DoH alone.\n- DoH-FIPS alone.\n- None, which is treated as Do53.\n\nFor an outbound endpoint you can apply the protocols as follows:\n\n- Do53 and DoH in combination.\n- Do53 alone.\n- DoH alone.\n- None, which is treated as Do53." + "Protocol": "The protocols for the target address. The protocol you choose needs to be supported by the outbound endpoint of the Resolver rule." }, "AWS::Route53Resolver::ResolverRuleAssociation": { "Name": "The name of an association between a Resolver rule and a VPC.", @@ -39281,7 +40637,7 @@ "PublicAccessBlockConfiguration": "Configuration that defines how Amazon S3 handles public access.", "ReplicationConfiguration": "Configuration for replicating objects in an S3 bucket. To enable replication, you must also enable versioning by using the `VersioningConfiguration` property.\n\nAmazon S3 can store replicated objects in a single destination bucket or multiple destination buckets. The destination bucket or buckets must already exist.", "Tags": "An arbitrary set of tags (key-value pairs) for this S3 bucket.", - "VersioningConfiguration": "Enables multiple versions of all objects in this bucket. You might enable versioning to prevent objects from being deleted or overwritten by mistake or to archive objects so that you can retrieve previous versions of them.", + "VersioningConfiguration": "Enables multiple versions of all objects in this bucket. You might enable versioning to prevent objects from being deleted or overwritten by mistake or to archive objects so that you can retrieve previous versions of them.\n\n> When you enable versioning on a bucket for the first time, it might take a short amount of time for the change to be fully propagated. We recommend that you wait for 15 minutes after enabling versioning before issuing write operations ( `PUT` or `DELETE` ) on objects in the bucket.", "WebsiteConfiguration": "Information used to configure the bucket as a static website. For more information, see [Hosting Websites on Amazon S3](https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html) ." }, "AWS::S3::Bucket AbortIncompleteMultipartUpload": { @@ -39363,7 +40719,8 @@ "Function": "The Amazon Resource Name (ARN) of the AWS Lambda function that Amazon S3 invokes when the specified event type occurs." }, "AWS::S3::Bucket LifecycleConfiguration": { - "Rules": "A lifecycle rule for individual objects in an Amazon S3 bucket." + "Rules": "Specifies lifecycle configuration rules for an Amazon S3 bucket.", + "TransitionDefaultMinimumObjectSize": "Indicates which default minimum object size behavior is applied to the lifecycle configuration.\n\n- `all_storage_classes_128K` - Objects smaller than 128 KB will not transition to any storage class by default.\n- `varies_by_storage_class` - Objects smaller than 128 KB will transition to Glacier Flexible Retrieval or Glacier Deep Archive storage classes. By default, all other storage classes will prevent transitions smaller than 128 KB.\n\nTo customize the minimum object size for any transition you can add a filter that specifies a custom `ObjectSizeGreaterThan` or `ObjectSizeLessThan` in the body of your transition rule. Custom filters always take precedence over the default transition behavior." }, "AWS::S3::Bucket LoggingConfiguration": { "DestinationBucketName": "The name of the bucket where Amazon S3 should store server access log files. You can store log files in any bucket that you own. By default, logs are stored in the bucket where the `LoggingConfiguration` property is defined.", @@ -39508,8 +40865,8 @@ "Rules": "A list of containers for the key-value pair that defines the criteria for the filter rule." }, "AWS::S3::Bucket ServerSideEncryptionByDefault": { - "KMSMasterKeyID": "AWS Key Management Service (KMS) customer AWS KMS key ID to use for the default encryption. This parameter is allowed if and only if `SSEAlgorithm` is set to `aws:kms` or `aws:kms:dsse` .\n\nYou can specify the key ID, key alias, or the Amazon Resource Name (ARN) of the KMS key.\n\n- Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab`\n- Key ARN: `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab`\n- Key Alias: `alias/alias-name`\n\nIf you use a key ID, you can run into a LogDestination undeliverable error when creating a VPC flow log.\n\nIf you are using encryption with cross-account or AWS service operations you must use a fully qualified KMS key ARN. For more information, see [Using encryption for cross-account operations](https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html#bucket-encryption-update-bucket-policy) .\n\n> Amazon S3 only supports symmetric encryption KMS keys. For more information, see [Asymmetric keys in AWS KMS](https://docs.aws.amazon.com//kms/latest/developerguide/symmetric-asymmetric.html) in the *AWS Key Management Service Developer Guide* .", - "SSEAlgorithm": "Server-side encryption algorithm to use for the default encryption." + "KMSMasterKeyID": "AWS Key Management Service (KMS) customer managed key ID to use for the default encryption.\n\n> - *General purpose buckets* - This parameter is allowed if and only if `SSEAlgorithm` is set to `aws:kms` or `aws:kms:dsse` .\n> - *Directory buckets* - This parameter is allowed if and only if `SSEAlgorithm` is set to `aws:kms` . \n\nYou can specify the key ID, key alias, or the Amazon Resource Name (ARN) of the KMS key.\n\n- Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab`\n- Key ARN: `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab`\n- Key Alias: `alias/alias-name`\n\nIf you are using encryption with cross-account or AWS service operations, you must use a fully qualified KMS key ARN. For more information, see [Using encryption for cross-account operations](https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html#bucket-encryption-update-bucket-policy) .\n\n> - *General purpose buckets* - If you're specifying a customer managed KMS key, we recommend using a fully qualified KMS key ARN. If you use a KMS key alias instead, then AWS KMS resolves the key within the requester\u2019s account. This behavior can result in data that's encrypted with a KMS key that belongs to the requester, and not the bucket owner. Also, if you use a key ID, you can run into a LogDestination undeliverable error when creating a VPC flow log.\n> - *Directory buckets* - When you specify an [AWS KMS customer managed key](https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk) for encryption in your directory bucket, only use the key ID or key ARN. The key alias format of the KMS key isn't supported. > Amazon S3 only supports symmetric encryption KMS keys. For more information, see [Asymmetric keys in AWS KMS](https://docs.aws.amazon.com//kms/latest/developerguide/symmetric-asymmetric.html) in the *AWS Key Management Service Developer Guide* .", + "SSEAlgorithm": "Server-side encryption algorithm to use for the default encryption.\n\n> For directory buckets, there are only two supported values for server-side encryption: `AES256` and `aws:kms` ." }, "AWS::S3::Bucket ServerSideEncryptionRule": { "BucketKeyEnabled": "Specifies whether Amazon S3 should use an S3 Bucket Key with server-side encryption using KMS (SSE-KMS) for new objects in the bucket. Existing objects are not affected. Setting the `BucketKeyEnabled` element to `true` causes Amazon S3 to use an S3 Bucket Key. By default, S3 Bucket Key is not enabled.\n\nFor more information, see [Amazon S3 Bucket Keys](https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) in the *Amazon S3 User Guide* .", @@ -39724,10 +41081,22 @@ "PolicyDocument": "A policy document containing permissions to add to the specified bucket. In IAM, you must provide policy documents in JSON format. However, in CloudFormation you can provide the policy in JSON or YAML format because CloudFormation converts YAML to JSON before submitting it to IAM. For more information, see the AWS::IAM::Policy [PolicyDocument](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-policy.html#cfn-iam-policy-policydocument) resource description in this guide and [Policies and Permissions in Amazon S3](https://docs.aws.amazon.com/AmazonS3/latest/dev/access-policy-language-overview.html) in the *Amazon S3 User Guide* ." }, "AWS::S3Express::DirectoryBucket": { - "BucketName": "A name for the bucket. The bucket name must contain only lowercase letters, numbers, and hyphens (-). A directory bucket name must be unique in the chosen Availability Zone. The bucket name must also follow the format `*bucket_base_name* -- *az_id* --x-s3` (for example, `*DOC-EXAMPLE-BUCKET* -- *usw2-az1* --x-s3` ). If you don't specify a name, AWS CloudFormation generates a unique ID and uses that ID for the bucket name. For information about bucket naming restrictions, see [Directory bucket naming rules](https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) in the *Amazon S3 User Guide* .\n\n> If you specify a name, you can't perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you need to replace the resource, specify a new name.", + "BucketEncryption": "Specifies default encryption for a bucket using server-side encryption with Amazon S3 managed keys (SSE-S3) or AWS KMS keys (SSE-KMS). For information about default encryption for directory buckets, see [Setting and monitoring default encryption for directory buckets](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-bucket-encryption.html) in the *Amazon S3 User Guide* .", + "BucketName": "A name for the bucket. The bucket name must contain only lowercase letters, numbers, and hyphens (-). A directory bucket name must be unique in the chosen Availability Zone. The bucket name must also follow the format `*bucket_base_name* -- *az_id* --x-s3` (for example, `*bucket_base_name* -- *usw2-az1* --x-s3` ). If you don't specify a name, AWS CloudFormation generates a unique ID and uses that ID for the bucket name. For information about bucket naming restrictions, see [Directory bucket naming rules](https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) in the *Amazon S3 User Guide* .\n\n> If you specify a name, you can't perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you need to replace the resource, specify a new name.", "DataRedundancy": "The number of Availability Zone that's used for redundancy for the bucket.", "LocationName": "The name of the location where the bucket will be created.\n\nFor directory buckets, the name of the location is the AZ ID of the Availability Zone where the bucket will be created. An example AZ ID value is `usw2-az1` ." }, + "AWS::S3Express::DirectoryBucket BucketEncryption": { + "ServerSideEncryptionConfiguration": "Specifies the default server-side-encryption configuration." + }, + "AWS::S3Express::DirectoryBucket ServerSideEncryptionByDefault": { + "KMSMasterKeyID": "AWS Key Management Service (KMS) customer managed key ID to use for the default encryption. This parameter is allowed only if `SSEAlgorithm` is set to `aws:kms` .\n\nYou can specify this parameter with the key ID or the Amazon Resource Name (ARN) of the KMS key. You can\u2019t use the key alias of the KMS key.\n\n- Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab`\n- Key ARN: `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab`\n\nIf you are using encryption with cross-account or AWS service operations, you must use a fully qualified KMS key ARN. For more information, see [Using encryption for cross-account operations](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-bucket-encryption.html#s3-express-bucket-encryption-update-bucket-policy) .\n\n> Your SSE-KMS configuration can only support 1 [customer managed key](https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk) per directory bucket for the lifetime of the bucket. [AWS managed key](https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk) ( `aws/s3` ) isn't supported. Also, after you specify a customer managed key for SSE-KMS and upload objects with this configuration, you can't override the customer managed key for your SSE-KMS configuration. To use a new customer manager key for your data, we recommend copying your existing objects to a new directory bucket with a new customer managed key. > Amazon S3 only supports symmetric encryption KMS keys. For more information, see [Asymmetric keys in AWS KMS](https://docs.aws.amazon.com//kms/latest/developerguide/symmetric-asymmetric.html) in the *AWS Key Management Service Developer Guide* .", + "SSEAlgorithm": "Server-side encryption algorithm to use for the default encryption.\n\n> For directory buckets, there are only two supported values for server-side encryption: `AES256` and `aws:kms` ." + }, + "AWS::S3Express::DirectoryBucket ServerSideEncryptionRule": { + "BucketKeyEnabled": "Specifies whether Amazon S3 should use an S3 Bucket Key with server-side encryption using KMS (SSE-KMS) for new objects in the bucket. S3 Bucket Keys are always enabled for `GET` and `PUT` operations on a directory bucket and can\u2019t be disabled. It's only allowed to set the `BucketKeyEnabled` element to `true` .\n\nS3 Bucket Keys aren't supported, when you copy SSE-KMS encrypted objects from general purpose buckets to directory buckets, from directory buckets to general purpose buckets, or between directory buckets, through [CopyObject](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) , [UploadPartCopy](https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) , [the Copy operation in Batch Operations](https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-objects-Batch-Ops) , or [the import jobs](https://docs.aws.amazon.com/AmazonS3/latest/userguide/create-import-job) . In this case, Amazon S3 makes a call to AWS KMS every time a copy request is made for a KMS-encrypted object.\n\nFor more information, see [Amazon S3 Bucket Keys](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-UsingKMSEncryption.html#s3-express-sse-kms-bucket-keys) in the *Amazon S3 User Guide* .", + "ServerSideEncryptionByDefault": "Specifies the default server-side encryption to apply to new objects in the bucket. If a PUT Object request doesn't specify any server-side encryption, this default encryption will be applied." + }, "AWS::S3ObjectLambda::AccessPoint": { "Name": "The name of this access point.", "ObjectLambdaConfiguration": "A configuration used when creating an Object Lambda Access Point." @@ -40091,7 +41460,8 @@ "Values": "The string(s) to be evaluated in a string condition expression. For all operators, except for NOT_EQUALS, if multiple values are given, the values are processed as an OR. That is, if any of the values match the email's string using the given operator, the condition is deemed to match. However, for NOT_EQUALS, the condition is only deemed to match if none of the given strings match the email's string." }, "AWS::SES::MailManagerRuleSet RuleStringToEvaluate": { - "Attribute": "The email attribute to evaluate in a string condition expression." + "Attribute": "The email attribute to evaluate in a string condition expression.", + "MimeHeaderAttribute": "The email MIME X-Header attribute to evaluate in a string condition expression." }, "AWS::SES::MailManagerRuleSet RuleVerdictExpression": { "Evaluate": "The verdict to evaluate in a verdict condition expression.", @@ -40224,7 +41594,8 @@ }, "AWS::SES::ReceiptRule S3Action": { "BucketName": "The name of the Amazon S3 bucket for incoming email.", - "KmsKeyArn": "The customer master key that Amazon SES should use to encrypt your emails before saving them to the Amazon S3 bucket. You can use the default master key or a custom master key that you created in AWS KMS as follows:\n\n- To use the default master key, provide an ARN in the form of `arn:aws:kms:REGION:ACCOUNT-ID-WITHOUT-HYPHENS:alias/aws/ses` . For example, if your AWS account ID is 123456789012 and you want to use the default master key in the US West (Oregon) Region, the ARN of the default master key would be `arn:aws:kms:us-west-2:123456789012:alias/aws/ses` . If you use the default master key, you don't need to perform any extra steps to give Amazon SES permission to use the key.\n- To use a custom master key that you created in AWS KMS, provide the ARN of the master key and ensure that you add a statement to your key's policy to give Amazon SES permission to use it. For more information about giving permissions, see the [Amazon SES Developer Guide](https://docs.aws.amazon.com/ses/latest/dg/receiving-email-permissions.html) .\n\nFor more information about key policies, see the [AWS KMS Developer Guide](https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html) . If you do not specify a master key, Amazon SES does not encrypt your emails.\n\n> Your mail is encrypted by Amazon SES using the Amazon S3 encryption client before the mail is submitted to Amazon S3 for storage. It is not encrypted using Amazon S3 server-side encryption. This means that you must use the Amazon S3 encryption client to decrypt the email after retrieving it from Amazon S3, as the service has no access to use your AWS KMS keys for decryption. This encryption client is currently available with the [AWS SDK for Java](https://docs.aws.amazon.com/sdk-for-java/) and [AWS SDK for Ruby](https://docs.aws.amazon.com/sdk-for-ruby/) only. For more information about client-side encryption using AWS KMS master keys, see the [Amazon S3 Developer Guide](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingClientSideEncryption.html) .", + "IamRoleArn": "The ARN of the IAM role to be used by Amazon Simple Email Service while writing to the Amazon S3 bucket, optionally encrypting your mail via the provided customer managed key, and publishing to the Amazon SNS topic. This role should have access to the following APIs:\n\n- `s3:PutObject` , `kms:Encrypt` and `kms:GenerateDataKey` for the given Amazon S3 bucket.\n- `kms:GenerateDataKey` for the given AWS KMS customer managed key.\n- `sns:Publish` for the given Amazon SNS topic.\n\n> If an IAM role ARN is provided, the role (and only the role) is used to access all the given resources (Amazon S3 bucket, AWS KMS customer managed key and Amazon SNS topic). Therefore, setting up individual resource access permissions is not required.", + "KmsKeyArn": "The customer managed key that Amazon SES should use to encrypt your emails before saving them to the Amazon S3 bucket. You can use the AWS managed key or a customer managed key that you created in AWS KMS as follows:\n\n- To use the AWS managed key, provide an ARN in the form of `arn:aws:kms:REGION:ACCOUNT-ID-WITHOUT-HYPHENS:alias/aws/ses` . For example, if your AWS account ID is 123456789012 and you want to use the AWS managed key in the US West (Oregon) Region, the ARN of the AWS managed key would be `arn:aws:kms:us-west-2:123456789012:alias/aws/ses` . If you use the AWS managed key, you don't need to perform any extra steps to give Amazon SES permission to use the key.\n- To use a customer managed key that you created in AWS KMS, provide the ARN of the customer managed key and ensure that you add a statement to your key's policy to give Amazon SES permission to use it. For more information about giving permissions, see the [Amazon SES Developer Guide](https://docs.aws.amazon.com/ses/latest/dg/receiving-email-permissions.html) .\n\nFor more information about key policies, see the [AWS KMS Developer Guide](https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html) . If you do not specify an AWS KMS key, Amazon SES does not encrypt your emails.\n\n> Your mail is encrypted by Amazon SES using the Amazon S3 encryption client before the mail is submitted to Amazon S3 for storage. It is not encrypted using Amazon S3 server-side encryption. This means that you must use the Amazon S3 encryption client to decrypt the email after retrieving it from Amazon S3, as the service has no access to use your AWS KMS keys for decryption. This encryption client is currently available with the [AWS SDK for Java](https://docs.aws.amazon.com/sdk-for-java/) and [AWS SDK for Ruby](https://docs.aws.amazon.com/sdk-for-ruby/) only. For more information about client-side encryption using AWS KMS managed keys, see the [Amazon S3 Developer Guide](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingClientSideEncryption.html) .", "ObjectKeyPrefix": "The key prefix of the Amazon S3 bucket. The key prefix is similar to a directory name that enables you to store similar data under the same directory in a bucket.", "TopicArn": "The ARN of the Amazon SNS topic to notify when the message is saved to the Amazon S3 bucket. You can find the ARN of a topic by using the [ListTopics](https://docs.aws.amazon.com/sns/latest/api/API_ListTopics.html) operation in Amazon SNS.\n\nFor more information about Amazon SNS topics, see the [Amazon SNS Developer Guide](https://docs.aws.amazon.com/sns/latest/dg/CreateTopic.html) ." }, @@ -40276,8 +41647,8 @@ "TopicArn": "The ARN of the topic to subscribe to." }, "AWS::SNS::Topic": { - "ArchivePolicy": "The archive policy determines the number of days Amazon SNS retains messages. You can set a retention period from 1 to 365 days.", - "ContentBasedDeduplication": "Enables content-based deduplication for FIFO topics.\n\n- By default, `ContentBasedDeduplication` is set to `false` . If you create a FIFO topic and this attribute is `false` , you must specify a value for the `MessageDeduplicationId` parameter for the [Publish](https://docs.aws.amazon.com/sns/latest/api/API_Publish.html) action.\n- When you set `ContentBasedDeduplication` to `true` , Amazon SNS uses a SHA-256 hash to generate the `MessageDeduplicationId` using the body of the message (but not the attributes of the message).\n\n(Optional) To override the generated value, you can specify a value for the the `MessageDeduplicationId` parameter for the `Publish` action.", + "ArchivePolicy": "The `ArchivePolicy` determines the number of days Amazon SNS retains messages in FIFO topics. You can set a retention period ranging from 1 to 365 days. This property is only applicable to FIFO topics; attempting to use it with standard topics will result in a creation failure.", + "ContentBasedDeduplication": "`ContentBasedDeduplication` enables deduplication of messages based on their content for FIFO topics. By default, this property is set to false. If you create a FIFO topic with `ContentBasedDeduplication` set to false, you must provide a `MessageDeduplicationId` for each `Publish` action. When set to true, Amazon SNS automatically generates a `MessageDeduplicationId` using a SHA-256 hash of the message body (excluding message attributes). You can optionally override this generated value by specifying a `MessageDeduplicationId` in the `Publish` action. Note that this property only applies to FIFO topics; using it with standard topics will cause the creation to fail.", "DataProtectionPolicy": "The body of the policy document you want to use for this topic.\n\nYou can only add one policy per topic.\n\nThe policy must be in JSON string format.\n\nLength Constraints: Maximum length of 30,720.", "DeliveryStatusLogging": "The `DeliveryStatusLogging` configuration enables you to log the delivery status of messages sent from your Amazon SNS topic to subscribed endpoints with the following supported delivery protocols:\n\n- HTTP\n- Amazon Kinesis Data Firehose\n- AWS Lambda\n- Platform application endpoint\n- Amazon Simple Queue Service\n\nOnce configured, log entries are sent to Amazon CloudWatch Logs.", "DisplayName": "The display name to use for an Amazon SNS topic with SMS subscriptions. The display name must be maximum 100 characters long, including hyphens (-), underscores (_), spaces, and tabs.", @@ -40315,13 +41686,13 @@ "ContentBasedDeduplication": "For first-in-first-out (FIFO) queues, specifies whether to enable content-based deduplication. During the deduplication interval, Amazon SQS treats messages that are sent with identical content as duplicates and delivers only one copy of the message. For more information, see the `ContentBasedDeduplication` attribute for the `[CreateQueue](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_CreateQueue.html)` action in the *Amazon SQS API Reference* .", "DeduplicationScope": "For high throughput for FIFO queues, specifies whether message deduplication occurs at the message group or queue level. Valid values are `messageGroup` and `queue` .\n\nTo enable high throughput for a FIFO queue, set this attribute to `messageGroup` *and* set the `FifoThroughputLimit` attribute to `perMessageGroupId` . If you set these attributes to anything other than these values, normal throughput is in effect and deduplication occurs as specified. For more information, see [High throughput for FIFO queues](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/high-throughput-fifo.html) and [Quotas related to messages](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/quotas-messages.html) in the *Amazon SQS Developer Guide* .", "DelaySeconds": "The time in seconds for which the delivery of all messages in the queue is delayed. You can specify an integer value of `0` to `900` (15 minutes). The default value is `0` .", - "FifoQueue": "If set to true, creates a FIFO queue. If you don't specify this property, Amazon SQS creates a standard queue. For more information, see [FIFO queues](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html) in the *Amazon SQS Developer Guide* .", + "FifoQueue": "If set to true, creates a FIFO queue. If you don't specify this property, Amazon SQS creates a standard queue. For more information, see [Amazon SQS FIFO queues](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-fifo-queues.html) in the *Amazon SQS Developer Guide* .", "FifoThroughputLimit": "For high throughput for FIFO queues, specifies whether the FIFO queue throughput quota applies to the entire queue or per message group. Valid values are `perQueue` and `perMessageGroupId` .\n\nTo enable high throughput for a FIFO queue, set this attribute to `perMessageGroupId` *and* set the `DeduplicationScope` attribute to `messageGroup` . If you set these attributes to anything other than these values, normal throughput is in effect and deduplication occurs as specified. For more information, see [High throughput for FIFO queues](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/high-throughput-fifo.html) and [Quotas related to messages](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/quotas-messages.html) in the *Amazon SQS Developer Guide* .", "KmsDataKeyReusePeriodSeconds": "The length of time in seconds for which Amazon SQS can reuse a data key to encrypt or decrypt messages before calling AWS KMS again. The value must be an integer between 60 (1 minute) and 86,400 (24 hours). The default is 300 (5 minutes).\n\n> A shorter time period provides better security, but results in more calls to AWS KMS , which might incur charges after Free Tier. For more information, see [Encryption at rest](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html#sqs-how-does-the-data-key-reuse-period-work) in the *Amazon SQS Developer Guide* .", - "KmsMasterKeyId": "The ID of an AWS Key Management Service (KMS) for Amazon SQS , or a custom KMS. To use the AWS managed KMS for Amazon SQS , specify a (default) alias ARN, alias name (e.g. `alias/aws/sqs` ), key ARN, or key ID. For more information, see the following:\n\n- [Encryption at rest](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html) in the *Amazon SQS Developer Guide*\n- [CreateQueue](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_CreateQueue.html) in the *Amazon SQS API Reference*\n- [Request Parameters](https://docs.aws.amazon.com/kms/latest/APIReference/API_DescribeKey.html#API_DescribeKey_RequestParameters) in the *AWS Key Management Service API Reference*\n- The Key Management Service (KMS) section of the [AWS Key Management Service Best Practices](https://docs.aws.amazon.com/https://d0.awsstatic.com/whitepapers/aws-kms-best-practices.pdf) whitepaper", + "KmsMasterKeyId": "The ID of an AWS Key Management Service (KMS) for Amazon SQS , or a custom KMS. To use the AWS managed KMS for Amazon SQS , specify a (default) alias ARN, alias name (for example `alias/aws/sqs` ), key ARN, or key ID. For more information, see the following:\n\n- [Encryption at rest](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html) in the *Amazon SQS Developer Guide*\n- [CreateQueue](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_CreateQueue.html) in the *Amazon SQS API Reference*\n- [Request Parameters](https://docs.aws.amazon.com/kms/latest/APIReference/API_DescribeKey.html#API_DescribeKey_RequestParameters) in the *AWS Key Management Service API Reference*\n- The Key Management Service (KMS) section of the [Security best practices for AWS Key Management Service](https://docs.aws.amazon.com/kms/latest/developerguide/best-practices.html) in the *AWS Key Management Service Developer Guide*", "MaximumMessageSize": "The limit of how many bytes that a message can contain before Amazon SQS rejects it. You can specify an integer value from `1,024` bytes (1 KiB) to `262,144` bytes (256 KiB). The default value is `262,144` (256 KiB).", "MessageRetentionPeriod": "The number of seconds that Amazon SQS retains a message. You can specify an integer value from `60` seconds (1 minute) to `1,209,600` seconds (14 days). The default value is `345,600` seconds (4 days).", - "QueueName": "A name for the queue. To create a FIFO queue, the name of your FIFO queue must end with the `.fifo` suffix. For more information, see [FIFO queues](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html) in the *Amazon SQS Developer Guide* .\n\nIf you don't specify a name, AWS CloudFormation generates a unique physical ID and uses that ID for the queue name. For more information, see [Name type](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-name.html) in the *AWS CloudFormation User Guide* .\n\n> If you specify a name, you can't perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you must replace the resource, specify a new name.", + "QueueName": "A name for the queue. To create a FIFO queue, the name of your FIFO queue must end with the `.fifo` suffix. For more information, see [Amazon SQS FIFO queues](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-fifo-queues.html) in the *Amazon SQS Developer Guide* .\n\nIf you don't specify a name, AWS CloudFormation generates a unique physical ID and uses that ID for the queue name. For more information, see [Name type](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-name.html) in the *AWS CloudFormation User Guide* .\n\n> If you specify a name, you can't perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you must replace the resource, specify a new name.", "ReceiveMessageWaitTimeSeconds": "Specifies the duration, in seconds, that the ReceiveMessage action call waits until a message is in the queue in order to include it in the response, rather than returning an empty response if a message isn't yet available. You can specify an integer from 1 to 20. Short polling is used as the default or when you specify 0 for this property. For more information, see [Consuming messages using long polling](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-short-and-long-polling.html#sqs-long-polling) in the *Amazon SQS Developer Guide* .", "RedriveAllowPolicy": "The string that includes the parameters for the permissions for the dead-letter queue redrive permission and which source queues can specify dead-letter queues as a JSON object. The parameters are as follows:\n\n- `redrivePermission` : The permission type that defines which source queues can specify the current queue as the dead-letter queue. Valid values are:\n\n- `allowAll` : (Default) Any source queues in this AWS account in the same Region can specify this queue as the dead-letter queue.\n- `denyAll` : No source queues can specify this queue as the dead-letter queue.\n- `byQueue` : Only queues specified by the `sourceQueueArns` parameter can specify this queue as the dead-letter queue.\n- `sourceQueueArns` : The Amazon Resource Names (ARN)s of the source queues that can specify this queue as the dead-letter queue and redrive messages. You can specify this parameter only when the `redrivePermission` parameter is set to `byQueue` . You can specify up to 10 source queue ARNs. To allow more than 10 source queues to specify dead-letter queues, set the `redrivePermission` parameter to `allowAll` .", "RedrivePolicy": "The string that includes the parameters for the dead-letter queue functionality of the source queue as a JSON object. The parameters are as follows:\n\n- `deadLetterTargetArn` : The Amazon Resource Name (ARN) of the dead-letter queue to which Amazon SQS moves messages after the value of `maxReceiveCount` is exceeded.\n- `maxReceiveCount` : The number of times a message is received by a consumer of the source queue before being moved to the dead-letter queue. When the `ReceiveCount` for a message exceeds the `maxReceiveCount` for a queue, Amazon SQS moves the message to the dead-letter-queue.\n\n> The dead-letter queue of a FIFO queue must also be a FIFO queue. Similarly, the dead-letter queue of a standard queue must also be a standard queue. \n\n*JSON*\n\n`{ \"deadLetterTargetArn\" : *String* , \"maxReceiveCount\" : *Integer* }`\n\n*YAML*\n\n`deadLetterTargetArn : *String*`\n\n`maxReceiveCount : *Integer*`", @@ -40387,7 +41758,7 @@ "AWS::SSM::Document AttachmentsSource": { "Key": "The key of a key-value pair that identifies the location of an attachment to a document.", "Name": "The name of the document attachment file.", - "Values": "The value of a key-value pair that identifies the location of an attachment to a document. The format for *Value* depends on the type of key you specify.\n\n- For the key *SourceUrl* , the value is an S3 bucket location. For example:\n\n`\"Values\": [ \"s3://doc-example-bucket/my-folder\" ]`\n- For the key *S3FileUrl* , the value is a file in an S3 bucket. For example:\n\n`\"Values\": [ \"s3://doc-example-bucket/my-folder/my-file.py\" ]`\n- For the key *AttachmentReference* , the value is constructed from the name of another SSM document in your account, a version number of that document, and a file attached to that document version that you want to reuse. For example:\n\n`\"Values\": [ \"MyOtherDocument/3/my-other-file.py\" ]`\n\nHowever, if the SSM document is shared with you from another account, the full SSM document ARN must be specified instead of the document name only. For example:\n\n`\"Values\": [ \"arn:aws:ssm:us-east-2:111122223333:document/OtherAccountDocument/3/their-file.py\" ]`" + "Values": "The value of a key-value pair that identifies the location of an attachment to a document. The format for *Value* depends on the type of key you specify.\n\n- For the key *SourceUrl* , the value is an S3 bucket location. For example:\n\n`\"Values\": [ \"s3://amzn-s3-demo-bucket/my-prefix\" ]`\n- For the key *S3FileUrl* , the value is a file in an S3 bucket. For example:\n\n`\"Values\": [ \"s3://amzn-s3-demo-bucket/my-prefix/my-file.py\" ]`\n- For the key *AttachmentReference* , the value is constructed from the name of another SSM document in your account, a version number of that document, and a file attached to that document version that you want to reuse. For example:\n\n`\"Values\": [ \"MyOtherDocument/3/my-other-file.py\" ]`\n\nHowever, if the SSM document is shared with you from another account, the full SSM document ARN must be specified instead of the document name only. For example:\n\n`\"Values\": [ \"arn:aws:ssm:us-east-2:111122223333:document/OtherAccountDocument/3/their-file.py\" ]`" }, "AWS::SSM::Document DocumentRequires": { "Name": "The name of the required SSM document. The name can be an Amazon Resource Name (ARN).", @@ -40434,7 +41805,7 @@ "MaxErrors": "The maximum number of errors allowed before this task stops being scheduled.\n\n> Although this element is listed as \"Required: No\", a value can be omitted only when you are registering or updating a [targetless task](https://docs.aws.amazon.com/systems-manager/latest/userguide/maintenance-windows-targetless-tasks.html) You must provide a value in all other cases.\n> \n> For maintenance window tasks without a target specified, you can't supply a value for this option. Instead, the system inserts a placeholder value of `1` . This value doesn't affect the running of your task.", "Name": "The task name.", "Priority": "The priority of the task in the maintenance window. The lower the number, the higher the priority. Tasks that have the same priority are scheduled in parallel.", - "ServiceRoleArn": "The Amazon Resource Name (ARN) of the IAM service role for AWS Systems Manager to assume when running a maintenance window task. If you do not specify a service role ARN, Systems Manager uses a service-linked role in your account. If no appropriate service-linked role for Systems Manager exists in your account, it is created when you run `RegisterTaskWithMaintenanceWindow` .\n\nHowever, for an improved security posture, we strongly recommend creating a custom policy and custom service role for running your maintenance window tasks. The policy can be crafted to provide only the permissions needed for your particular maintenance window tasks. For more information, see [Setting up maintenance windows](https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-maintenance-permissions.html) in the in the *AWS Systems Manager User Guide* .", + "ServiceRoleArn": "The Amazon Resource Name (ARN) of the IAM service role for AWS Systems Manager to assume when running a maintenance window task. If you do not specify a service role ARN, Systems Manager uses a service-linked role in your account. If no appropriate service-linked role for Systems Manager exists in your account, it is created when you run `RegisterTaskWithMaintenanceWindow` .\n\nHowever, for an improved security posture, we strongly recommend creating a custom policy and custom service role for running your maintenance window tasks. The policy can be crafted to provide only the permissions needed for your particular maintenance window tasks. For more information, see [Setting up Maintenance Windows](https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-maintenance-permissions.html) in the in the *AWS Systems Manager User Guide* .", "Targets": "The targets, either instances or window target IDs.\n\n- Specify instances using `Key=InstanceIds,Values= *instanceid1* , *instanceid2*` .\n- Specify window target IDs using `Key=WindowTargetIds,Values= *window-target-id-1* , *window-target-id-2*` .", "TaskArn": "The resource that the task uses during execution.\n\nFor `RUN_COMMAND` and `AUTOMATION` task types, `TaskArn` is the SSM document name or Amazon Resource Name (ARN).\n\nFor `LAMBDA` tasks, `TaskArn` is the function name or ARN.\n\nFor `STEP_FUNCTIONS` tasks, `TaskArn` is the state machine ARN.", "TaskInvocationParameters": "The parameters to pass to the task when it runs. Populate only the fields that match the task type. All other fields should be empty.\n\n> When you update a maintenance window task that has options specified in `TaskInvocationParameters` , you must provide again all the `TaskInvocationParameters` values that you want to retain. The values you do not specify again are removed. For example, suppose that when you registered a Run Command task, you specified `TaskInvocationParameters` values for `Comment` , `NotificationConfig` , and `OutputS3BucketName` . If you update the maintenance window task and specify only a different `OutputS3BucketName` value, the values for `Comment` and `NotificationConfig` are removed.", @@ -40470,7 +41841,7 @@ "OutputS3BucketName": "The name of the Amazon Simple Storage Service (Amazon S3) bucket.", "OutputS3KeyPrefix": "The S3 bucket subfolder.", "Parameters": "The parameters for the `RUN_COMMAND` task execution.\n\nThe supported parameters are the same as those for the `SendCommand` API call. For more information, see [SendCommand](https://docs.aws.amazon.com/systems-manager/latest/APIReference/API_SendCommand.html) in the *AWS Systems Manager API Reference* .", - "ServiceRoleArn": "The Amazon Resource Name (ARN) of the IAM service role for AWS Systems Manager to assume when running a maintenance window task. If you do not specify a service role ARN, Systems Manager uses a service-linked role in your account. If no appropriate service-linked role for Systems Manager exists in your account, it is created when you run `RegisterTaskWithMaintenanceWindow` .\n\nHowever, for an improved security posture, we strongly recommend creating a custom policy and custom service role for running your maintenance window tasks. The policy can be crafted to provide only the permissions needed for your particular maintenance window tasks. For more information, see [Setting up maintenance windows](https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-maintenance-permissions.html) in the in the *AWS Systems Manager User Guide* .", + "ServiceRoleArn": "The Amazon Resource Name (ARN) of the IAM service role for AWS Systems Manager to assume when running a maintenance window task. If you do not specify a service role ARN, Systems Manager uses a service-linked role in your account. If no appropriate service-linked role for Systems Manager exists in your account, it is created when you run `RegisterTaskWithMaintenanceWindow` .\n\nHowever, for an improved security posture, we strongly recommend creating a custom policy and custom service role for running your maintenance window tasks. The policy can be crafted to provide only the permissions needed for your particular maintenance window tasks. For more information, see [Setting up Maintenance Windows](https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-maintenance-permissions.html) in the in the *AWS Systems Manager User Guide* .", "TimeoutSeconds": "If this time is reached and the command hasn't already started running, it doesn't run." }, "AWS::SSM::MaintenanceWindowTask MaintenanceWindowStepFunctionsParameters": { @@ -40505,16 +41876,16 @@ }, "AWS::SSM::PatchBaseline": { "ApprovalRules": "A set of rules used to include patches in the baseline.", - "ApprovedPatches": "A list of explicitly approved patches for the baseline.\n\nFor information about accepted formats for lists of approved patches and rejected patches, see [About package name formats for approved and rejected patch lists](https://docs.aws.amazon.com/systems-manager/latest/userguide/patch-manager-approved-rejected-package-name-formats.html) in the *AWS Systems Manager User Guide* .", + "ApprovedPatches": "A list of explicitly approved patches for the baseline.\n\nFor information about accepted formats for lists of approved patches and rejected patches, see [Package name formats for approved and rejected patch lists](https://docs.aws.amazon.com/systems-manager/latest/userguide/patch-manager-approved-rejected-package-name-formats.html) in the *AWS Systems Manager User Guide* .", "ApprovedPatchesComplianceLevel": "Defines the compliance level for approved patches. When an approved patch is reported as missing, this value describes the severity of the compliance violation. The default value is `UNSPECIFIED` .", "ApprovedPatchesEnableNonSecurity": "Indicates whether the list of approved patches includes non-security updates that should be applied to the managed nodes. The default value is `false` . Applies to Linux managed nodes only.", "Description": "A description of the patch baseline.", - "GlobalFilters": "A set of global filters used to include patches in the baseline.", + "GlobalFilters": "A set of global filters used to include patches in the baseline.\n\n> The `GlobalFilters` parameter can be configured only by using the AWS CLI or an AWS SDK. It can't be configured from the Patch Manager console, and its value isn't displayed in the console.", "Name": "The name of the patch baseline.", "OperatingSystem": "Defines the operating system the patch baseline applies to. The default value is `WINDOWS` .", "PatchGroups": "The name of the patch group to be registered with the patch baseline.", - "RejectedPatches": "A list of explicitly rejected patches for the baseline.\n\nFor information about accepted formats for lists of approved patches and rejected patches, see [About package name formats for approved and rejected patch lists](https://docs.aws.amazon.com/systems-manager/latest/userguide/patch-manager-approved-rejected-package-name-formats.html) in the *AWS Systems Manager User Guide* .", - "RejectedPatchesAction": "The action for Patch Manager to take on patches included in the `RejectedPackages` list.\n\n- *`ALLOW_AS_DEPENDENCY`* : A package in the `Rejected` patches list is installed only if it is a dependency of another package. It is considered compliant with the patch baseline, and its status is reported as `InstalledOther` . This is the default action if no option is specified.\n- *BLOCK* : Packages in the *Rejected patches* list, and packages that include them as dependencies, aren't installed by Patch Manager under any circumstances. If a package was installed before it was added to the *Rejected patches* list, or is installed outside of Patch Manager afterward, it's considered noncompliant with the patch baseline and its status is reported as *InstalledRejected* .", + "RejectedPatches": "A list of explicitly rejected patches for the baseline.\n\nFor information about accepted formats for lists of approved patches and rejected patches, see [Package name formats for approved and rejected patch lists](https://docs.aws.amazon.com/systems-manager/latest/userguide/patch-manager-approved-rejected-package-name-formats.html) in the *AWS Systems Manager User Guide* .", + "RejectedPatchesAction": "The action for Patch Manager to take on patches included in the `RejectedPackages` list.\n\n- **ALLOW_AS_DEPENDENCY** - *Linux and macOS* : A package in the rejected patches list is installed only if it is a dependency of another package. It is considered compliant with the patch baseline, and its status is reported as `INSTALLED_OTHER` . This is the default action if no option is specified.\n\n*Windows Server* : Windows Server doesn't support the concept of package dependencies. If a package in the rejected patches list and already installed on the node, its status is reported as `INSTALLED_OTHER` . Any package not already installed on the node is skipped. This is the default action if no option is specified.\n- **BLOCK** - *All OSs* : Packages in the rejected patches list, and packages that include them as dependencies, aren't installed by Patch Manager under any circumstances. If a package was installed before it was added to the rejected patches list, or is installed outside of Patch Manager afterward, it's considered noncompliant with the patch baseline and its status is reported as `INSTALLED_REJECTED` .", "Sources": "Information about the patches to use to update the managed nodes, including target operating systems and source repositories. Applies to Linux managed nodes only.", "Tags": "Optional metadata that you assign to a resource. Tags enable you to categorize a resource in different ways, such as by purpose, owner, or environment. For example, you might want to tag a patch baseline to identify the severity level of patches it specifies and the operating system family it applies to." }, @@ -40531,8 +41902,8 @@ "Products": "The specific operating system versions a patch repository applies to, such as \"Ubuntu16.04\", \"RedhatEnterpriseLinux7.2\" or \"Suse12.7\". For lists of supported product values, see [PatchFilter](https://docs.aws.amazon.com/systems-manager/latest/APIReference/API_PatchFilter.html) in the *AWS Systems Manager API Reference* ." }, "AWS::SSM::PatchBaseline Rule": { - "ApproveAfterDays": "The number of days after the release date of each patch matched by the rule that the patch is marked as approved in the patch baseline. For example, a value of `7` means that patches are approved seven days after they are released.\n\nYou must specify a value for `ApproveAfterDays` .\n\nException: Not supported on Debian Server or Ubuntu Server.", - "ApproveUntilDate": "The cutoff date for auto approval of released patches. Any patches released on or before this date are installed automatically. Not supported on Debian Server or Ubuntu Server.\n\nEnter dates in the format `YYYY-MM-DD` . For example, `2021-12-31` .", + "ApproveAfterDays": "The number of days after the release date of each patch matched by the rule that the patch is marked as approved in the patch baseline. For example, a value of `7` means that patches are approved seven days after they are released.\n\nThis parameter is marked as `Required: No` , but your request must include a value for either `ApproveAfterDays` or `ApproveUntilDate` .\n\nNot supported for Debian Server or Ubuntu Server.\n\n> Use caution when setting this value for Windows Server patch baselines. Because patch updates that are replaced by later updates are removed, setting too broad a value for this parameter can result in crucial patches not being installed. For more information, see the *Windows Server* tab in the topic [How security patches are selected](https://docs.aws.amazon.com/systems-manager/latest/userguide/patch-manager-selecting-patches.html) in the *AWS Systems Manager User Guide* .", + "ApproveUntilDate": "The cutoff date for auto approval of released patches. Any patches released on or before this date are installed automatically.\n\nEnter dates in the format `YYYY-MM-DD` . For example, `2024-12-31` .\n\nThis parameter is marked as `Required: No` , but your request must include a value for either `ApproveUntilDate` or `ApproveAfterDays` .\n\nNot supported for Debian Server or Ubuntu Server.\n\n> Use caution when setting this value for Windows Server patch baselines. Because patch updates that are replaced by later updates are removed, setting too broad a value for this parameter can result in crucial patches not being installed. For more information, see the *Windows Server* tab in the topic [How security patches are selected](https://docs.aws.amazon.com/systems-manager/latest/userguide/patch-manager-selecting-patches.html) in the *AWS Systems Manager User Guide* .", "ComplianceLevel": "A compliance severity level for all approved patches in a patch baseline. Valid compliance severity levels include the following: `UNSPECIFIED` , `CRITICAL` , `HIGH` , `MEDIUM` , `LOW` , and `INFORMATIONAL` .", "EnableNonSecurity": "For managed nodes identified by the approval rule filters, enables a patch baseline to apply non-security updates available in the specified repository. The default value is `false` . Applies to Linux managed nodes only.", "PatchFilterGroup": "The patch filter group that defines the criteria for the rule." @@ -40740,6 +42111,27 @@ "Key": "The tag key.", "Value": "The tag value." }, + "AWS::SSMQuickSetup::ConfigurationManager": { + "ConfigurationDefinitions": "The definition of the Quick Setup configuration that the configuration manager deploys.", + "Description": "The description of the configuration.", + "Name": "The name of the configuration", + "Tags": "Key-value pairs of metadata to assign to the configuration manager." + }, + "AWS::SSMQuickSetup::ConfigurationManager ConfigurationDefinition": { + "LocalDeploymentAdministrationRoleArn": "The ARN of the IAM role used to administrate local configuration deployments.", + "LocalDeploymentExecutionRoleName": "The name of the IAM role used to deploy local configurations.", + "Parameters": "The parameters for the configuration definition type. Parameters for configuration definitions vary based the configuration type. The following tables outline the parameters for each configuration type.\n\n- **OpsCenter (Type: AWS QuickSetupType-SSMOpsCenter)** - - `DelegatedAccountId`\n\n- Description: (Required) The ID of the delegated administrator account.\n- `TargetOrganizationalUnits`\n\n- Description: (Required) A comma separated list of organizational units (OUs) you want to deploy the configuration to.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **Resource Scheduler (Type: AWS QuickSetupType-Scheduler)** - - `TargetTagKey`\n\n- Description: (Required) The tag key assigned to the instances you want to target.\n- `TargetTagValue`\n\n- Description: (Required) The value of the tag key assigned to the instances you want to target.\n- `ICalendarString`\n\n- Description: (Required) An iCalendar formatted string containing the schedule you want Change Manager to use.\n- `TargetAccounts`\n\n- Description: (Optional) The ID of the AWS account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either `TargetAccounts` or `TargetOrganizationalUnits` .\n- `TargetOrganizationalUnits`\n\n- Description: (Optional) A comma separated list of organizational units (OUs) you want to deploy the configuration to.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **Default Host Management Configuration (Type: AWS QuickSetupType-DHMC)** - - `UpdateSSMAgent`\n\n- Description: (Optional) A boolean value that determines whether the SSM Agent is updated on the target instances every 2 weeks. The default value is \" `true` \".\n- `TargetOrganizationalUnits`\n\n- Description: (Required) A comma separated list of organizational units (OUs) you want to deploy the configuration to.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **Resource Explorer (Type: AWS QuickSetupType-ResourceExplorer)** - - `SelectedAggregatorRegion`\n\n- Description: (Required) The AWS Region where you want to create the aggregator index.\n- `ReplaceExistingAggregator`\n\n- Description: (Required) A boolean value that determines whether to demote an existing aggregator if it is in a Region that differs from the value you specify for the `SelectedAggregatorRegion` .\n- `TargetOrganizationalUnits`\n\n- Description: (Required) A comma separated list of organizational units (OUs) you want to deploy the configuration to.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **Change Manager (Type: AWS QuickSetupType-SSMChangeMgr)** - - `DelegatedAccountId`\n\n- Description: (Required) The ID of the delegated administrator account.\n- `JobFunction`\n\n- Description: (Required) The name for the Change Manager job function.\n- `PermissionType`\n\n- Description: (Optional) Specifies whether you want to use default administrator permissions for the job function role, or provide a custom IAM policy. The valid values are `CustomPermissions` and `AdminPermissions` . The default value for the parameter is `CustomerPermissions` .\n- `CustomPermissions`\n\n- Description: (Optional) A JSON string containing the IAM policy you want your job function to use. You must provide a value for this parameter if you specify `CustomPermissions` for the `PermissionType` parameter.\n- `TargetOrganizationalUnits`\n\n- Description: (Required) A comma separated list of organizational units (OUs) you want to deploy the configuration to.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **DevOps\u00a0Guru (Type: AWS QuickSetupType-DevOpsGuru)** - - `AnalyseAllResources`\n\n- Description: (Optional) A boolean value that determines whether DevOps\u00a0Guru analyzes all AWS CloudFormation stacks in the account. The default value is \" `false` \".\n- `EnableSnsNotifications`\n\n- Description: (Optional) A boolean value that determines whether DevOps\u00a0Guru sends notifications when an insight is created. The default value is \" `true` \".\n- `EnableSsmOpsItems`\n\n- Description: (Optional) A boolean value that determines whether DevOps\u00a0Guru creates an OpsCenter OpsItem when an insight is created. The default value is \" `true` \".\n- `EnableDriftRemediation`\n\n- Description: (Optional) A boolean value that determines whether a drift remediation schedule is used. The default value is \" `false` \".\n- `RemediationSchedule`\n\n- Description: (Optional) A rate expression that defines the schedule for drift remediation. The valid values are `rate(30 days)` , `rate(14 days)` , `rate(1 days)` , and `none` . The default value is \" `none` \".\n- `TargetAccounts`\n\n- Description: (Optional) The ID of the AWS account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either `TargetAccounts` or `TargetOrganizationalUnits` .\n- `TargetOrganizationalUnits`\n\n- Description: (Optional) A comma separated list of organizational units (OUs) you want to deploy the configuration to.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **Conformance Packs (Type: AWS QuickSetupType-CFGCPacks)** - - `DelegatedAccountId`\n\n- Description: (Optional) The ID of the delegated administrator account. This parameter is required for Organization deployments.\n- `RemediationSchedule`\n\n- Description: (Optional) A rate expression that defines the schedule for drift remediation. The valid values are `rate(30 days)` , `rate(14 days)` , `rate(2 days)` , and `none` . The default value is \" `none` \".\n- `CPackNames`\n\n- Description: (Required) A comma separated list of AWS Config conformance packs.\n- `TargetAccounts`\n\n- Description: (Optional) The ID of the AWS account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either `TargetAccounts` or `TargetOrganizationalUnits` .\n- `TargetOrganizationalUnits`\n\n- Description: (Optional) The ID of the root of your Organization. This configuration type doesn't currently support choosing specific OUs. The configuration will be deployed to all the OUs in the Organization.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **AWS Config Recording (Type: AWS QuickSetupType-CFGRecording)** - - `RecordAllResources`\n\n- Description: (Optional) A boolean value that determines whether all supported resources are recorded. The default value is \" `true` \".\n- `ResourceTypesToRecord`\n\n- Description: (Optional) A comma separated list of resource types you want to record.\n- `RecordGlobalResourceTypes`\n\n- Description: (Optional) A boolean value that determines whether global resources are recorded with all resource configurations. The default value is \" `false` \".\n- `GlobalResourceTypesRegion`\n\n- Description: (Optional) Determines the AWS Region where global resources are recorded.\n- `UseCustomBucket`\n\n- Description: (Optional) A boolean value that determines whether a custom Amazon S3 bucket is used for delivery. The default value is \" `false` \".\n- `DeliveryBucketName`\n\n- Description: (Optional) The name of the Amazon S3 bucket you want AWS Config to deliver configuration snapshots and configuration history files to.\n- `DeliveryBucketPrefix`\n\n- Description: (Optional) The key prefix you want to use in the custom Amazon S3 bucket.\n- `NotificationOptions`\n\n- Description: (Optional) Determines the notification configuration for the recorder. The valid values are `NoStreaming` , `UseExistingTopic` , and `CreateTopic` . The default value is `NoStreaming` .\n- `CustomDeliveryTopicAccountId`\n\n- Description: (Optional) The ID of the AWS account where the Amazon SNS topic you want to use for notifications resides. You must specify a value for this parameter if you use the `UseExistingTopic` notification option.\n- `CustomDeliveryTopicName`\n\n- Description: (Optional) The name of the Amazon SNS topic you want to use for notifications. You must specify a value for this parameter if you use the `UseExistingTopic` notification option.\n- `RemediationSchedule`\n\n- Description: (Optional) A rate expression that defines the schedule for drift remediation. The valid values are `rate(30 days)` , `rate(7 days)` , `rate(1 days)` , and `none` . The default value is \" `none` \".\n- `TargetAccounts`\n\n- Description: (Optional) The ID of the AWS account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either `TargetAccounts` or `TargetOrganizationalUnits` .\n- `TargetOrganizationalUnits`\n\n- Description: (Optional) The ID of the root of your Organization. This configuration type doesn't currently support choosing specific OUs. The configuration will be deployed to all the OUs in the Organization.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **Host Management (Type: AWS QuickSetupType-SSMHostMgmt)** - - `UpdateSSMAgent`\n\n- Description: (Optional) A boolean value that determines whether the SSM Agent is updated on the target instances every 2 weeks. The default value is \" `true` \".\n- `UpdateEc2LaunchAgent`\n\n- Description: (Optional) A boolean value that determines whether the EC2 Launch agent is updated on the target instances every month. The default value is \" `false` \".\n- `CollectInventory`\n\n- Description: (Optional) A boolean value that determines whether the EC2 Launch agent is updated on the target instances every month. The default value is \" `true` \".\n- `ScanInstances`\n\n- Description: (Optional) A boolean value that determines whether the target instances are scanned daily for available patches. The default value is \" `true` \".\n- `InstallCloudWatchAgent`\n\n- Description: (Optional) A boolean value that determines whether the Amazon CloudWatch agent is installed on the target instances. The default value is \" `false` \".\n- `UpdateCloudWatchAgent`\n\n- Description: (Optional) A boolean value that determines whether the Amazon CloudWatch agent is updated on the target instances every month. The default value is \" `false` \".\n- `IsPolicyAttachAllowed`\n\n- Description: (Optional) A boolean value that determines whether Quick Setup attaches policies to instances profiles already associated with the target instances. The default value is \" `false` \".\n- `TargetType`\n\n- Description: (Optional) Determines how instances are targeted for local account deployments. Don't specify a value for this parameter if you're deploying to OUs. The valid values are `*` , `InstanceIds` , `ResourceGroups` , and `Tags` . Use `*` to target all instances in the account.\n- `TargetInstances`\n\n- Description: (Optional) A comma separated list of instance IDs. You must provide a value for this parameter if you specify `InstanceIds` for the `TargetType` parameter.\n- `TargetTagKey`\n\n- Description: (Optional) The tag key assigned to the instances you want to target. You must provide a value for this parameter if you specify `Tags` for the `TargetType` parameter.\n- `TargetTagValue`\n\n- Description: (Optional) The value of the tag key assigned to the instances you want to target. You must provide a value for this parameter if you specify `Tags` for the `TargetType` parameter.\n- `ResourceGroupName`\n\n- Description: (Optional) The name of the resource group associated with the instances you want to target. You must provide a value for this parameter if you specify `ResourceGroups` for the `TargetType` parameter.\n- `TargetAccounts`\n\n- Description: (Optional) The ID of the AWS account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either `TargetAccounts` or `TargetOrganizationalUnits` .\n- `TargetOrganizationalUnits`\n\n- Description: (Optional) A comma separated list of organizational units (OUs) you want to deploy the configuration to.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **Distributor (Type: AWS QuickSetupType-Distributor)** - - `PackagesToInstall`\n\n- Description: (Required) A comma separated list of packages you want to install on the target instances. The valid values are `AWSEFSTools` , `AWSCWAgent` , and `AWSEC2LaunchAgent` .\n- `RemediationSchedule`\n\n- Description: (Optional) A rate expression that defines the schedule for drift remediation. The valid values are `rate(30 days)` , `rate(14 days)` , `rate(2 days)` , and `none` . The default value is \" `rate(30 days)` \".\n- `IsPolicyAttachAllowed`\n\n- Description: (Optional) A boolean value that determines whether Quick Setup attaches policies to instances profiles already associated with the target instances. The default value is \" `false` \".\n- `TargetType`\n\n- Description: (Optional) Determines how instances are targeted for local account deployments. Don't specify a value for this parameter if you're deploying to OUs. The valid values are `*` , `InstanceIds` , `ResourceGroups` , and `Tags` . Use `*` to target all instances in the account.\n- `TargetInstances`\n\n- Description: (Optional) A comma separated list of instance IDs. You must provide a value for this parameter if you specify `InstanceIds` for the `TargetType` parameter.\n- `TargetTagKey`\n\n- Description: (Required) The tag key assigned to the instances you want to target. You must provide a value for this parameter if you specify `Tags` for the `TargetType` parameter.\n- `TargetTagValue`\n\n- Description: (Required) The value of the tag key assigned to the instances you want to target. You must provide a value for this parameter if you specify `Tags` for the `TargetType` parameter.\n- `ResourceGroupName`\n\n- Description: (Required) The name of the resource group associated with the instances you want to target. You must provide a value for this parameter if you specify `ResourceGroups` for the `TargetType` parameter.\n- `TargetAccounts`\n\n- Description: (Optional) The ID of the AWS account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either `TargetAccounts` or `TargetOrganizationalUnits` .\n- `TargetOrganizationalUnits`\n\n- Description: (Optional) A comma separated list of organizational units (OUs) you want to deploy the configuration to.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.\n- **Patch Policy (Type: AWS QuickSetupType-PatchPolicy)** - - `PatchPolicyName`\n\n- Description: (Required) A name for the patch policy. The value you provide is applied to target Amazon EC2 instances as a tag.\n- `SelectedPatchBaselines`\n\n- Description: (Required) An array of JSON objects containing the information for the patch baselines to include in your patch policy.\n- `PatchBaselineUseDefault`\n\n- Description: (Optional) A boolean value that determines whether the selected patch baselines are all AWS provided.\n- `ConfigurationOptionsPatchOperation`\n\n- Description: (Optional) Determines whether target instances scan for available patches, or scan and install available patches. The valid values are `Scan` and `ScanAndInstall` . The default value for the parameter is `Scan` .\n- `ConfigurationOptionsScanValue`\n\n- Description: (Optional) A cron expression that is used as the schedule for when instances scan for available patches.\n- `ConfigurationOptionsInstallValue`\n\n- Description: (Optional) A cron expression that is used as the schedule for when instances install available patches.\n- `ConfigurationOptionsScanNextInterval`\n\n- Description: (Optional) A boolean value that determines whether instances should scan for available patches at the next cron interval. The default value is \" `false` \".\n- `ConfigurationOptionsInstallNextInterval`\n\n- Description: (Optional) A boolean value that determines whether instances should scan for available patches at the next cron interval. The default value is \" `false` \".\n- `RebootOption`\n\n- Description: (Optional) A boolean value that determines whether instances are rebooted after patches are installed. The default value is \" `false` \".\n- `IsPolicyAttachAllowed`\n\n- Description: (Optional) A boolean value that determines whether Quick Setup attaches policies to instances profiles already associated with the target instances. The default value is \" `false` \".\n- `OutputLogEnableS3`\n\n- Description: (Optional) A boolean value that determines whether command output logs are sent to Amazon S3.\n- `OutputS3Location`\n\n- Description: (Optional) A JSON string containing information about the Amazon S3 bucket where you want to store the output details of the request.\n\n- `OutputS3BucketRegion`\n\n- Description: (Optional) The AWS Region where the Amazon S3 bucket you want AWS Config to deliver command output to is located.\n- `OutputS3BucketName`\n\n- Description: (Optional) The name of the Amazon S3 bucket you want AWS Config to deliver command output to.\n- `OutputS3KeyPrefix`\n\n- Description: (Optional) The key prefix you want to use in the custom Amazon S3 bucket.\n- `TargetType`\n\n- Description: (Optional) Determines how instances are targeted for local account deployments. Don't specify a value for this parameter if you're deploying to OUs. The valid values are `*` , `InstanceIds` , `ResourceGroups` , and `Tags` . Use `*` to target all instances in the account.\n- `TargetInstances`\n\n- Description: (Optional) A comma separated list of instance IDs. You must provide a value for this parameter if you specify `InstanceIds` for the `TargetType` parameter.\n- `TargetTagKey`\n\n- Description: (Required) The tag key assigned to the instances you want to target. You must provide a value for this parameter if you specify `Tags` for the `TargetType` parameter.\n- `TargetTagValue`\n\n- Description: (Required) The value of the tag key assigned to the instances you want to target. You must provide a value for this parameter if you specify `Tags` for the `TargetType` parameter.\n- `ResourceGroupName`\n\n- Description: (Required) The name of the resource group associated with the instances you want to target. You must provide a value for this parameter if you specify `ResourceGroups` for the `TargetType` parameter.\n- `TargetAccounts`\n\n- Description: (Optional) The ID of the AWS account initiating the configuration deployment. You only need to provide a value for this parameter if you want to deploy the configuration locally. A value must be provided for either `TargetAccounts` or `TargetOrganizationalUnits` .\n- `TargetOrganizationalUnits`\n\n- Description: (Optional) A comma separated list of organizational units (OUs) you want to deploy the configuration to.\n- `TargetRegions`\n\n- Description: (Required) A comma separated list of AWS Regions you want to deploy the configuration to.", + "Type": "The type of the Quick Setup configuration.", + "TypeVersion": "The version of the Quick Setup type used.", + "id": "The ID of the configuration definition." + }, + "AWS::SSMQuickSetup::ConfigurationManager StatusSummary": { + "LastUpdatedAt": "The datetime stamp when the status was last updated.", + "Status": "The current status.", + "StatusDetails": "Details about the status.", + "StatusMessage": "When applicable, returns an informational message relevant to the current status and status type of the status summary object. We don't recommend implementing parsing logic around this value since the messages returned can vary in format.", + "StatusType": "The type of a status summary." + }, "AWS::SSO::Application": { "ApplicationProviderArn": "The ARN of the application provider for this application.", "Description": "The description of the application.", @@ -40827,6 +42219,7 @@ }, "AWS::SageMaker::App ResourceSpec": { "InstanceType": "The instance type that the image version runs on.\n\n> *JupyterServer apps* only support the `system` value.\n> \n> For *KernelGateway apps* , the `system` value is translated to `ml.t3.medium` . KernelGateway apps also support all other values for available instance types.", + "LifecycleConfigArn": "The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource.", "SageMakerImageArn": "The ARN of the SageMaker image that the image version belongs to.", "SageMakerImageVersionArn": "The ARN of the image version created on the instance." }, @@ -40873,6 +42266,49 @@ "Key": "The tag key. Tag keys must be unique per resource.", "Value": "The tag value." }, + "AWS::SageMaker::Cluster": { + "ClusterName": "The name of the SageMaker HyperPod cluster.", + "InstanceGroups": "The instance groups of the SageMaker HyperPod cluster.", + "NodeRecovery": "Specifies whether to enable or disable the automatic node recovery feature of SageMaker HyperPod. Available values are `Automatic` for enabling and `None` for disabling.", + "Orchestrator": "The orchestrator type for the SageMaker HyperPod cluster. Currently, `'eks'` is the only available option.", + "Tags": "A tag object that consists of a key and an optional value, used to manage metadata for SageMaker AWS resources.\n\nYou can add tags to notebook instances, training jobs, hyperparameter tuning jobs, batch transform jobs, models, labeling jobs, work teams, endpoint configurations, and endpoints. For more information on adding tags to SageMaker resources, see [AddTags](https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_AddTags.html) .\n\nFor more information on adding metadata to your AWS resources with tagging, see [Tagging AWS resources](https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) . For advice on best practices for managing AWS resources with tagging, see [Tagging Best Practices: Implement an Effective AWS Resource Tagging Strategy](https://docs.aws.amazon.com/https://d1.awsstatic.com/whitepapers/aws-tagging-best-practices.pdf) .", + "VpcConfig": "Specifies an Amazon Virtual Private Cloud (VPC) that your SageMaker jobs, hosted models, and compute resources have access to. You can control access to and from your resources by configuring a VPC. For more information, see [Give SageMaker Access to Resources in your Amazon VPC](https://docs.aws.amazon.com/sagemaker/latest/dg/infrastructure-give-access.html) ." + }, + "AWS::SageMaker::Cluster ClusterEbsVolumeConfig": { + "VolumeSizeInGB": "The size in gigabytes (GB) of the additional EBS volume to be attached to the instances in the SageMaker HyperPod cluster instance group. The additional EBS volume is attached to each instance within the SageMaker HyperPod cluster instance group and mounted to `/opt/sagemaker` ." + }, + "AWS::SageMaker::Cluster ClusterInstanceGroup": { + "CurrentCount": "The number of instances that are currently in the instance group of a SageMaker HyperPod cluster.", + "ExecutionRole": "The execution role for the instance group to assume.", + "InstanceCount": "The number of instances in an instance group of the SageMaker HyperPod cluster.", + "InstanceGroupName": "The name of the instance group of a SageMaker HyperPod cluster.", + "InstanceStorageConfigs": "The configurations of additional storage specified to the instance group where the instance (node) is launched.", + "InstanceType": "The instance type of the instance group of a SageMaker HyperPod cluster.", + "LifeCycleConfig": "The lifecycle configuration for a SageMaker HyperPod cluster.", + "OnStartDeepHealthChecks": "A flag indicating whether deep health checks should be performed when the HyperPod cluster instance group is created or updated. Deep health checks are comprehensive, invasive tests that validate the health of the underlying hardware and infrastructure components.", + "ThreadsPerCore": "The number of threads per CPU core you specified under `CreateCluster` ." + }, + "AWS::SageMaker::Cluster ClusterInstanceStorageConfig": { + "EbsVolumeConfig": "Defines the configuration for attaching additional Amazon Elastic Block Store (EBS) volumes to the instances in the SageMaker HyperPod cluster instance group. The additional EBS volume is attached to each instance within the SageMaker HyperPod cluster instance group and mounted to `/opt/sagemaker` ." + }, + "AWS::SageMaker::Cluster ClusterLifeCycleConfig": { + "OnCreate": "The file name of the entrypoint script of lifecycle scripts under `SourceS3Uri` . This entrypoint script runs during cluster creation.", + "SourceS3Uri": "An Amazon S3 bucket path where your lifecycle scripts are stored.\n\n> Make sure that the S3 bucket path starts with `s3://sagemaker-` . The [IAM role for SageMaker HyperPod](https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-hyperpod-prerequisites.html#sagemaker-hyperpod-prerequisites-iam-role-for-hyperpod) has the managed [`AmazonSageMakerClusterInstanceRolePolicy`](https://docs.aws.amazon.com/sagemaker/latest/dg/security-iam-awsmanpol-cluster.html) attached, which allows access to S3 buckets with the specific prefix `sagemaker-` ." + }, + "AWS::SageMaker::Cluster ClusterOrchestratorEksConfig": { + "ClusterArn": "The Amazon Resource Name (ARN) of the SageMaker HyperPod cluster." + }, + "AWS::SageMaker::Cluster Orchestrator": { + "Eks": "The configuration of the Amazon EKS orchestrator cluster for the SageMaker HyperPod cluster." + }, + "AWS::SageMaker::Cluster Tag": { + "Key": "The tag key. Tag keys must be unique per resource.", + "Value": "The tag value." + }, + "AWS::SageMaker::Cluster VpcConfig": { + "SecurityGroupIds": "The VPC security group IDs, in the form `sg-xxxxxxxx` . Specify the security groups for the VPC that is specified in the `Subnets` field.", + "Subnets": "The ID of the subnets in the VPC to which you want to connect your training job or model. For information about the availability of specific instance types, see [Supported Instance Types and Availability Zones](https://docs.aws.amazon.com/sagemaker/latest/dg/instance-types-az.html) ." + }, "AWS::SageMaker::CodeRepository": { "CodeRepositoryName": "The name of the Git repository.", "GitConfig": "Configuration details for the Git repository, including the URL where it is located and the ARN of the AWS Secrets Manager secret that contains the credentials used to access the repository.", @@ -41028,7 +42464,11 @@ "Tags": "Tags to associated with the Domain. Each tag consists of a key and an optional value. Tag keys must be unique per resource. Tags are searchable using the Search API.\n\nTags that you specify for the Domain are also added to all apps that are launched in the Domain.\n\n*Array members* : Minimum number of 0 items. Maximum number of 50 items.", "VpcId": "The ID of the Amazon Virtual Private Cloud (Amazon VPC) that Studio uses for communication.\n\n*Length Constraints* : Maximum length of 32.\n\n*Pattern* : `[-0-9a-zA-Z]+`" }, + "AWS::SageMaker::Domain AppLifecycleManagement": { + "IdleSettings": "Settings related to idle shutdown of Studio applications." + }, "AWS::SageMaker::Domain CodeEditorAppSettings": { + "AppLifecycleManagement": "Settings that are used to configure and manage the lifecycle of CodeEditor applications.", "CustomImages": "A list of custom SageMaker images that are configured to run as a Code Editor app.", "DefaultResourceSpec": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the Code Editor app.", "LifecycleConfigArns": "The Amazon Resource Name (ARN) of the Code Editor application lifecycle configuration." @@ -41078,18 +42518,27 @@ "FileSystemId": "The ID of your Amazon EFS file system.", "FileSystemPath": "The path to the file system directory that is accessible in Amazon SageMaker Studio. Permitted users can access only this directory and below." }, + "AWS::SageMaker::Domain IdleSettings": { + "IdleTimeoutInMinutes": "The time that SageMaker waits after the application becomes idle before shutting it down.", + "LifecycleManagement": "Indicates whether idle shutdown is activated for the application type.", + "MaxIdleTimeoutInMinutes": "The maximum value in minutes that custom idle shutdown can be set to by the user.", + "MinIdleTimeoutInMinutes": "The minimum value in minutes that custom idle shutdown can be set to by the user." + }, "AWS::SageMaker::Domain JupyterLabAppSettings": { + "AppLifecycleManagement": "Indicates whether idle shutdown is activated for JupyterLab applications.", "CodeRepositories": "A list of Git repositories that SageMaker automatically displays to users for cloning in the JupyterLab application.", "CustomImages": "A list of custom SageMaker images that are configured to run as a JupyterLab app.", "DefaultResourceSpec": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the JupyterLab app.", "LifecycleConfigArns": "The Amazon Resource Name (ARN) of the lifecycle configurations attached to the user profile or domain. To remove a lifecycle config, you must set `LifecycleConfigArns` to an empty list." }, "AWS::SageMaker::Domain JupyterServerAppSettings": { - "DefaultResourceSpec": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the JupyterServer app." + "DefaultResourceSpec": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the JupyterServer app.", + "LifecycleConfigArns": "The Amazon Resource Name (ARN) of the Lifecycle Configurations attached to the JupyterServerApp. If you use this parameter, the `DefaultResourceSpec` parameter is also required.\n\n> To remove a Lifecycle Config, you must set `LifecycleConfigArns` to an empty list." }, "AWS::SageMaker::Domain KernelGatewayAppSettings": { "CustomImages": "A list of custom SageMaker images that are configured to run as a KernelGateway app.", - "DefaultResourceSpec": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the KernelGateway app.\n\n> The Amazon SageMaker Studio UI does not use the default instance type value set here. The default instance type set here is used when Apps are created using the AWS CLI or AWS CloudFormation and the instance type parameter value is not passed." + "DefaultResourceSpec": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the KernelGateway app.\n\n> The Amazon SageMaker Studio UI does not use the default instance type value set here. The default instance type set here is used when Apps are created using the AWS CLI or AWS CloudFormation and the instance type parameter value is not passed.", + "LifecycleConfigArns": "The Amazon Resource Name (ARN) of the Lifecycle Configurations attached to the the user profile or domain.\n\n> To remove a Lifecycle Config, you must set `LifecycleConfigArns` to an empty list." }, "AWS::SageMaker::Domain RSessionAppSettings": { "CustomImages": "A list of custom SageMaker images that are configured to run as a RSession app.", @@ -41366,7 +42815,8 @@ "Processor": "", "ProgrammingLang": "", "ReleaseNotes": "", - "VendorGuidance": "" + "VendorGuidance": "", + "Version": "The version number." }, "AWS::SageMaker::InferenceComponent": { "EndpointArn": "The Amazon Resource Name (ARN) of the endpoint that hosts the inference component.", @@ -41471,6 +42921,20 @@ "Key": "The tag key. Tag keys must be unique per resource.", "Value": "The tag value." }, + "AWS::SageMaker::MlflowTrackingServer": { + "ArtifactStoreUri": "", + "AutomaticModelRegistration": "", + "MlflowVersion": "", + "RoleArn": "", + "Tags": "", + "TrackingServerName": "", + "TrackingServerSize": "", + "WeeklyMaintenanceWindowStart": "" + }, + "AWS::SageMaker::MlflowTrackingServer Tag": { + "Key": "The tag key. Tag keys must be unique per resource.", + "Value": "The tag value." + }, "AWS::SageMaker::Model": { "Containers": "Specifies the containers in the inference pipeline.", "EnableNetworkIsolation": "Isolates the model container. No inbound or outbound network calls can be made to or from the model container.", @@ -41483,7 +42947,7 @@ }, "AWS::SageMaker::Model ContainerDefinition": { "ContainerHostname": "This parameter is ignored for models that contain only a `PrimaryContainer` .\n\nWhen a `ContainerDefinition` is part of an inference pipeline, the value of the parameter uniquely identifies the container for the purposes of logging and metrics. For information, see [Use Logs and Metrics to Monitor an Inference Pipeline](https://docs.aws.amazon.com/sagemaker/latest/dg/inference-pipeline-logs-metrics.html) . If you don't specify a value for this parameter for a `ContainerDefinition` that is part of an inference pipeline, a unique name is automatically assigned based on the position of the `ContainerDefinition` in the pipeline. If you specify a value for the `ContainerHostName` for any `ContainerDefinition` that is part of an inference pipeline, you must specify a value for the `ContainerHostName` parameter of every `ContainerDefinition` in that pipeline.", - "Environment": "The environment variables to set in the Docker container.\n\nThe maximum length of each key and value in the `Environment` map is 1024 bytes. The maximum length of all keys and values in the map, combined, is 32 KB. If you pass multiple containers to a `CreateModel` request, then the maximum length of all of their maps, combined, is also 32 KB.", + "Environment": "The environment variables to set in the Docker container. Don't include any sensitive data in your environment variables.\n\nThe maximum length of each key and value in the `Environment` map is 1024 bytes. The maximum length of all keys and values in the map, combined, is 32 KB. If you pass multiple containers to a `CreateModel` request, then the maximum length of all of their maps, combined, is also 32 KB.", "Image": "The path where inference code is stored. This can be either in Amazon EC2 Container Registry or in a Docker registry that is accessible from the same VPC that you configure for your endpoint. If you are using your own custom algorithm instead of an algorithm provided by SageMaker, the inference code must meet SageMaker requirements. SageMaker supports both `registry/repository[:tag]` and `registry/repository[@digest]` image path formats. For more information, see [Using Your Own Algorithms with Amazon SageMaker](https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms.html) .\n\n> The model artifacts in an Amazon S3 bucket and the Docker image for inference container in Amazon EC2 Container Registry must be in the same region as the model or endpoint you are creating.", "ImageConfig": "Specifies whether the model container is in Amazon ECR or a private Docker registry accessible from your Amazon Virtual Private Cloud (VPC). For information about storing containers in a private Docker registry, see [Use a Private Docker Registry for Real-Time Inference Containers](https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms-containers-inference-private.html) .\n\n> The model artifacts in an Amazon S3 bucket and the Docker image for inference container in Amazon EC2 Container Registry must be in the same region as the model or endpoint you are creating.", "InferenceSpecificationName": "The inference specification name in the model package version.", @@ -41493,6 +42957,9 @@ "ModelPackageName": "The name or Amazon Resource Name (ARN) of the model package to use to create the model.", "MultiModelConfig": "Specifies additional configuration for multi-model endpoints." }, + "AWS::SageMaker::Model HubAccessConfig": { + "HubContentArn": "" + }, "AWS::SageMaker::Model ImageConfig": { "RepositoryAccessMode": "Set this to one of the following values:\n\n- `Platform` - The model image is hosted in Amazon ECR.\n- `Vpc` - The model image is hosted in a private Docker registry in your VPC.", "RepositoryAuthConfig": "(Optional) Specifies an authentication configuration for the private docker registry where your model image is hosted. Specify a value for this property only if you specified `Vpc` as the value for the `RepositoryAccessMode` field, and the private Docker registry where the model image is hosted requires authentication." @@ -41514,6 +42981,7 @@ }, "AWS::SageMaker::Model S3DataSource": { "CompressionType": "", + "HubAccessConfig": "", "ModelAccessConfig": "", "S3DataType": "If you choose `S3Prefix` , `S3Uri` identifies a key name prefix. SageMaker uses all objects that match the specified key name prefix for model training.\n\nIf you choose `ManifestFile` , `S3Uri` identifies an object that is a manifest file containing a list of object keys that you want SageMaker to use for model training.\n\nIf you choose `AugmentedManifestFile` , S3Uri identifies an object that is an augmented manifest file in JSON lines format. This file contains the data you want to use for model training. `AugmentedManifestFile` can only be used if the Channel's input mode is `Pipe` .", "S3Uri": "Depending on the value specified for the `S3DataType` , identifies either a key name prefix or a manifest. For example:\n\n- A key name prefix might look like this: `s3://bucketname/exampleprefix/`\n- A manifest might look like this: `s3://bucketname/example.manifest`\n\nA manifest is an S3 object which is a JSON file consisting of an array of elements. The first element is a prefix which is followed by one or more suffixes. SageMaker appends the suffix elements to the prefix to get a full set of `S3Uri` . Note that the prefix must be a valid non-empty `S3Uri` that precludes users from specifying a manifest whose individual `S3Uri` is sourced from different S3 buckets.\n\nThe following code example shows a valid manifest format:\n\n`[ {\"prefix\": \"s3://customer_bucket/some/prefix/\"},`\n\n`\"relative/path/to/custdata-1\",`\n\n`\"relative/path/custdata-2\",`\n\n`...`\n\n`\"relative/path/custdata-N\"`\n\n`]`\n\nThis JSON is equivalent to the following `S3Uri` list:\n\n`s3://customer_bucket/some/prefix/relative/path/to/custdata-1`\n\n`s3://customer_bucket/some/prefix/relative/path/custdata-2`\n\n`...`\n\n`s3://customer_bucket/some/prefix/relative/path/custdata-N`\n\nThe complete set of `S3Uri` in this manifest is the input data for the channel for this data source. The object that each `S3Uri` points to must be readable by the IAM role that SageMaker uses to perform tasks on your behalf.\n\nYour input bucket must be located in same AWS region as your training job." @@ -41891,6 +43359,7 @@ "LastModifiedTime": "The last time the model package was modified.", "MetadataProperties": "Metadata properties of the tracking entity, trial, or trial component.", "ModelApprovalStatus": "The approval status of the model. This can be one of the following values.\n\n- `APPROVED` - The model is approved\n- `REJECTED` - The model is rejected.\n- `PENDING_MANUAL_APPROVAL` - The model is waiting for manual approval.", + "ModelCard": "An Amazon SageMaker Model Card.", "ModelMetrics": "Metrics for the model.", "ModelPackageDescription": "The description of the model package.", "ModelPackageGroupName": "The model group to which the model belongs.", @@ -41898,8 +43367,10 @@ "ModelPackageStatusDetails": "Specifies the validation and image scan statuses of the model package.", "ModelPackageVersion": "The version number of a versioned model.", "SamplePayloadUrl": "The Amazon Simple Storage Service path where the sample payload are stored. This path must point to a single gzip compressed tar archive (.tar.gz suffix).", + "SecurityConfig": "", "SkipModelValidation": "Indicates if you want to skip model validation.", "SourceAlgorithmSpecification": "A list of algorithms that were used to create a model package.", + "SourceUri": "The URI of the source for the model package.", "Tags": "A list of the tags associated with the model package. For more information, see [Tagging AWS resources](https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) in the *AWS General Reference Guide* .", "Task": "The machine learning task your model package accomplishes. Common machine learning tasks include object detection and image classification.", "ValidationSpecification": "Specifies batch transform jobs that SageMaker runs to validate your model package." @@ -41970,10 +43441,20 @@ "ContentType": "The metric source content type.", "S3Uri": "The S3 URI for the metrics source." }, + "AWS::SageMaker::ModelPackage ModelAccessConfig": { + "AcceptEula": "Specifies agreement to the model end-user license agreement (EULA). The `AcceptEula` value must be explicitly defined as `True` in order to accept the EULA that this model requires. You are responsible for reviewing and complying with any applicable license terms and making sure they are acceptable for your use case before downloading or using a model." + }, + "AWS::SageMaker::ModelPackage ModelCard": { + "ModelCardContent": "", + "ModelCardStatus": "The approval status of the model card within your organization. Different organizations might have different criteria for model card review and approval.\n\n- `Draft` : The model card is a work in progress.\n- `PendingReview` : The model card is pending review.\n- `Approved` : The model card is approved.\n- `Archived` : The model card is archived. No more updates should be made to the model card, but it can still be exported." + }, "AWS::SageMaker::ModelPackage ModelDataQuality": { "Constraints": "Data quality constraints for a model.", "Statistics": "Data quality statistics for a model." }, + "AWS::SageMaker::ModelPackage ModelDataSource": { + "S3DataSource": "Specifies the S3 location of ML model data to deploy." + }, "AWS::SageMaker::ModelPackage ModelInput": { "DataInputConfig": "The input configuration object for the model." }, @@ -41990,6 +43471,7 @@ "FrameworkVersion": "The framework version of the Model Package Container Image.", "Image": "The Amazon EC2 Container Registry (Amazon ECR) path where inference code is stored.\n\nIf you are using your own custom algorithm instead of an algorithm provided by SageMaker, the inference code must meet SageMaker requirements. SageMaker supports both `registry/repository[:tag]` and `registry/repository[@digest]` image path formats. For more information, see [Using Your Own Algorithms with Amazon SageMaker](https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms.html) .", "ImageDigest": "An MD5 hash of the training algorithm that identifies the Docker image used for training.", + "ModelDataSource": "Specifies the location of ML model data to deploy during endpoint creation.", "ModelDataUrl": "The Amazon S3 path where the model artifacts, which result from model training, are stored. This path must point to a single `gzip` compressed tar archive ( `.tar.gz` suffix).\n\n> The model artifacts must be in an S3 bucket that is in the same region as the model package.", "ModelInput": "A structure with Model Input details.", "NearestModelName": "The name of a pre-trained machine learning benchmarked by Amazon SageMaker Inference Recommender model that matches your model. You can find a list of benchmarked models by calling `ListModelMetadata` ." @@ -42010,6 +43492,15 @@ "S3DataType": "If you choose `S3Prefix` , `S3Uri` identifies a key name prefix. SageMaker uses all objects that match the specified key name prefix for model training.\n\nIf you choose `ManifestFile` , `S3Uri` identifies an object that is a manifest file containing a list of object keys that you want SageMaker to use for model training.\n\nIf you choose `AugmentedManifestFile` , S3Uri identifies an object that is an augmented manifest file in JSON lines format. This file contains the data you want to use for model training. `AugmentedManifestFile` can only be used if the Channel's input mode is `Pipe` .", "S3Uri": "Depending on the value specified for the `S3DataType` , identifies either a key name prefix or a manifest. For example:\n\n- A key name prefix might look like this: `s3://bucketname/exampleprefix/`\n- A manifest might look like this: `s3://bucketname/example.manifest`\n\nA manifest is an S3 object which is a JSON file consisting of an array of elements. The first element is a prefix which is followed by one or more suffixes. SageMaker appends the suffix elements to the prefix to get a full set of `S3Uri` . Note that the prefix must be a valid non-empty `S3Uri` that precludes users from specifying a manifest whose individual `S3Uri` is sourced from different S3 buckets.\n\nThe following code example shows a valid manifest format:\n\n`[ {\"prefix\": \"s3://customer_bucket/some/prefix/\"},`\n\n`\"relative/path/to/custdata-1\",`\n\n`\"relative/path/custdata-2\",`\n\n`...`\n\n`\"relative/path/custdata-N\"`\n\n`]`\n\nThis JSON is equivalent to the following `S3Uri` list:\n\n`s3://customer_bucket/some/prefix/relative/path/to/custdata-1`\n\n`s3://customer_bucket/some/prefix/relative/path/custdata-2`\n\n`...`\n\n`s3://customer_bucket/some/prefix/relative/path/custdata-N`\n\nThe complete set of `S3Uri` in this manifest is the input data for the channel for this data source. The object that each `S3Uri` points to must be readable by the IAM role that SageMaker uses to perform tasks on your behalf.\n\nYour input bucket must be located in same AWS region as your training job." }, + "AWS::SageMaker::ModelPackage S3ModelDataSource": { + "CompressionType": "Specifies how the ML model data is prepared.\n\nIf you choose `Gzip` and choose `S3Object` as the value of `S3DataType` , `S3Uri` identifies an object that is a gzip-compressed TAR archive. SageMaker will attempt to decompress and untar the object during model deployment.\n\nIf you choose `None` and chooose `S3Object` as the value of `S3DataType` , `S3Uri` identifies an object that represents an uncompressed ML model to deploy.\n\nIf you choose None and choose `S3Prefix` as the value of `S3DataType` , `S3Uri` identifies a key name prefix, under which all objects represents the uncompressed ML model to deploy.\n\nIf you choose None, then SageMaker will follow rules below when creating model data files under /opt/ml/model directory for use by your inference code:\n\n- If you choose `S3Object` as the value of `S3DataType` , then SageMaker will split the key of the S3 object referenced by `S3Uri` by slash (/), and use the last part as the filename of the file holding the content of the S3 object.\n- If you choose `S3Prefix` as the value of `S3DataType` , then for each S3 object under the key name pefix referenced by `S3Uri` , SageMaker will trim its key by the prefix, and use the remainder as the path (relative to `/opt/ml/model` ) of the file holding the content of the S3 object. SageMaker will split the remainder by slash (/), using intermediate parts as directory names and the last part as filename of the file holding the content of the S3 object.\n- Do not use any of the following as file names or directory names:\n\n- An empty or blank string\n- A string which contains null bytes\n- A string longer than 255 bytes\n- A single dot ( `.` )\n- A double dot ( `..` )\n- Ambiguous file names will result in model deployment failure. For example, if your uncompressed ML model consists of two S3 objects `s3://mybucket/model/weights` and `s3://mybucket/model/weights/part1` and you specify `s3://mybucket/model/` as the value of `S3Uri` and `S3Prefix` as the value of `S3DataType` , then it will result in name clash between `/opt/ml/model/weights` (a regular file) and `/opt/ml/model/weights/` (a directory).\n- Do not organize the model artifacts in [S3 console using folders](https://docs.aws.amazon.com//AmazonS3/latest/userguide/using-folders.html) . When you create a folder in S3 console, S3 creates a 0-byte object with a key set to the folder name you provide. They key of the 0-byte object ends with a slash (/) which violates SageMaker restrictions on model artifact file names, leading to model deployment failure.", + "ModelAccessConfig": "Specifies the access configuration file for the ML model. You can explicitly accept the model end-user license agreement (EULA) within the `ModelAccessConfig` . You are responsible for reviewing and complying with any applicable license terms and making sure they are acceptable for your use case before downloading or using a model.", + "S3DataType": "Specifies the type of ML model data to deploy.\n\nIf you choose `S3Prefix` , `S3Uri` identifies a key name prefix. SageMaker uses all objects that match the specified key name prefix as part of the ML model data to deploy. A valid key name prefix identified by `S3Uri` always ends with a forward slash (/).\n\nIf you choose `S3Object` , `S3Uri` identifies an object that is the ML model data to deploy.", + "S3Uri": "Specifies the S3 path of ML model data to deploy." + }, + "AWS::SageMaker::ModelPackage SecurityConfig": { + "KmsKeyId": "" + }, "AWS::SageMaker::ModelPackage SourceAlgorithm": { "AlgorithmName": "The name of an algorithm that was used to create the model package. The algorithm must be either an algorithm resource in your SageMaker account or an algorithm in AWS Marketplace that you are subscribed to.", "ModelDataUrl": "The Amazon S3 path where the model artifacts, which result from model training, are stored. This path must point to a single `gzip` compressed tar archive ( `.tar.gz` suffix).\n\n> The model artifacts must be in an S3 bucket that is in the same AWS region as the algorithm." @@ -42409,24 +43900,35 @@ "EbsVolumeSizeInGb": "The size of an EBS storage volume for a space." }, "AWS::SageMaker::Space JupyterServerAppSettings": { - "DefaultResourceSpec": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the JupyterServer app. If you use the `LifecycleConfigArns` parameter, then this parameter is also required." + "DefaultResourceSpec": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the JupyterServer app. If you use the `LifecycleConfigArns` parameter, then this parameter is also required.", + "LifecycleConfigArns": "The Amazon Resource Name (ARN) of the Lifecycle Configurations attached to the JupyterServerApp. If you use this parameter, the `DefaultResourceSpec` parameter is also required.\n\n> To remove a Lifecycle Config, you must set `LifecycleConfigArns` to an empty list." }, "AWS::SageMaker::Space KernelGatewayAppSettings": { "CustomImages": "A list of custom SageMaker images that are configured to run as a KernelGateway app.", - "DefaultResourceSpec": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the KernelGateway app.\n\n> The Amazon SageMaker Studio UI does not use the default instance type value set here. The default instance type set here is used when Apps are created using the AWS CLI or AWS CloudFormation and the instance type parameter value is not passed." + "DefaultResourceSpec": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the KernelGateway app.\n\n> The Amazon SageMaker Studio UI does not use the default instance type value set here. The default instance type set here is used when Apps are created using the AWS CLI or AWS CloudFormation and the instance type parameter value is not passed.", + "LifecycleConfigArns": "The Amazon Resource Name (ARN) of the Lifecycle Configurations attached to the the user profile or domain.\n\n> To remove a Lifecycle Config, you must set `LifecycleConfigArns` to an empty list." }, "AWS::SageMaker::Space OwnershipSettings": { "OwnerUserProfileName": "The user profile who is the owner of the space." }, "AWS::SageMaker::Space ResourceSpec": { "InstanceType": "The instance type that the image version runs on.\n\n> *JupyterServer apps* only support the `system` value.\n> \n> For *KernelGateway apps* , the `system` value is translated to `ml.t3.medium` . KernelGateway apps also support all other values for available instance types.", + "LifecycleConfigArn": "The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource.", "SageMakerImageArn": "The ARN of the SageMaker image that the image version belongs to.", "SageMakerImageVersionArn": "The ARN of the image version created on the instance." }, + "AWS::SageMaker::Space SpaceAppLifecycleManagement": { + "IdleSettings": "Settings related to idle shutdown of Studio applications." + }, "AWS::SageMaker::Space SpaceCodeEditorAppSettings": { + "AppLifecycleManagement": "Settings that are used to configure and manage the lifecycle of CodeEditor applications in a space.", "DefaultResourceSpec": "Specifies the ARNs of a SageMaker image and SageMaker image version, and the instance type that the version runs on." }, + "AWS::SageMaker::Space SpaceIdleSettings": { + "IdleTimeoutInMinutes": "The time that SageMaker waits after the application becomes idle before shutting it down." + }, "AWS::SageMaker::Space SpaceJupyterLabAppSettings": { + "AppLifecycleManagement": "Settings that are used to configure and manage the lifecycle of JupyterLab applications in a space.", "CodeRepositories": "A list of Git repositories that SageMaker automatically displays to users for cloning in the JupyterLab application.", "DefaultResourceSpec": "Specifies the ARNs of a SageMaker image and SageMaker image version, and the instance type that the version runs on." }, @@ -42449,6 +43951,16 @@ "Key": "The tag key. Tag keys must be unique per resource.", "Value": "The tag value." }, + "AWS::SageMaker::StudioLifecycleConfig": { + "StudioLifecycleConfigAppType": "The App type to which the Lifecycle Configuration is attached.", + "StudioLifecycleConfigContent": "", + "StudioLifecycleConfigName": "The name of the Amazon SageMaker Studio Lifecycle Configuration.", + "Tags": "" + }, + "AWS::SageMaker::StudioLifecycleConfig Tag": { + "Key": "The tag key. Tag keys must be unique per resource.", + "Value": "The tag value." + }, "AWS::SageMaker::UserProfile": { "DomainId": "The domain ID.", "SingleSignOnUserIdentifier": "A specifier for the type of value specified in SingleSignOnUserValue. Currently, the only supported value is \"UserName\". If the Domain's AuthMode is IAM Identity Center , this field is required. If the Domain's AuthMode is not IAM Identity Center , this field cannot be specified.", @@ -42457,7 +43969,11 @@ "UserProfileName": "The user profile name.", "UserSettings": "A collection of settings that apply to users of Amazon SageMaker Studio." }, + "AWS::SageMaker::UserProfile AppLifecycleManagement": { + "IdleSettings": "Settings related to idle shutdown of Studio applications." + }, "AWS::SageMaker::UserProfile CodeEditorAppSettings": { + "AppLifecycleManagement": "Settings that are used to configure and manage the lifecycle of CodeEditor applications.", "CustomImages": "A list of custom SageMaker images that are configured to run as a Code Editor app.", "DefaultResourceSpec": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the Code Editor app.", "LifecycleConfigArns": "The Amazon Resource Name (ARN) of the Code Editor application lifecycle configuration." @@ -42488,18 +44004,27 @@ "FileSystemId": "The ID of your Amazon EFS file system.", "FileSystemPath": "The path to the file system directory that is accessible in Amazon SageMaker Studio. Permitted users can access only this directory and below." }, + "AWS::SageMaker::UserProfile IdleSettings": { + "IdleTimeoutInMinutes": "The time that SageMaker waits after the application becomes idle before shutting it down.", + "LifecycleManagement": "Indicates whether idle shutdown is activated for the application type.", + "MaxIdleTimeoutInMinutes": "The maximum value in minutes that custom idle shutdown can be set to by the user.", + "MinIdleTimeoutInMinutes": "The minimum value in minutes that custom idle shutdown can be set to by the user." + }, "AWS::SageMaker::UserProfile JupyterLabAppSettings": { + "AppLifecycleManagement": "Indicates whether idle shutdown is activated for JupyterLab applications.", "CodeRepositories": "A list of Git repositories that SageMaker automatically displays to users for cloning in the JupyterLab application.", "CustomImages": "A list of custom SageMaker images that are configured to run as a JupyterLab app.", "DefaultResourceSpec": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the JupyterLab app.", "LifecycleConfigArns": "The Amazon Resource Name (ARN) of the lifecycle configurations attached to the user profile or domain. To remove a lifecycle config, you must set `LifecycleConfigArns` to an empty list." }, "AWS::SageMaker::UserProfile JupyterServerAppSettings": { - "DefaultResourceSpec": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the JupyterServer app." + "DefaultResourceSpec": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the JupyterServer app.", + "LifecycleConfigArns": "The Amazon Resource Name (ARN) of the Lifecycle Configurations attached to the JupyterServerApp. If you use this parameter, the `DefaultResourceSpec` parameter is also required.\n\n> To remove a Lifecycle Config, you must set `LifecycleConfigArns` to an empty list." }, "AWS::SageMaker::UserProfile KernelGatewayAppSettings": { "CustomImages": "A list of custom SageMaker images that are configured to run as a KernelGateway app.", - "DefaultResourceSpec": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the KernelGateway app.\n\n> The Amazon SageMaker Studio UI does not use the default instance type value set here. The default instance type set here is used when Apps are created using the AWS CLI or AWS CloudFormation and the instance type parameter value is not passed." + "DefaultResourceSpec": "The default instance type and the Amazon Resource Name (ARN) of the default SageMaker image used by the KernelGateway app.\n\n> The Amazon SageMaker Studio UI does not use the default instance type value set here. The default instance type set here is used when Apps are created using the AWS CLI or AWS CloudFormation and the instance type parameter value is not passed.", + "LifecycleConfigArns": "The Amazon Resource Name (ARN) of the Lifecycle Configurations attached to the the user profile or domain.\n\n> To remove a Lifecycle Config, you must set `LifecycleConfigArns` to an empty list." }, "AWS::SageMaker::UserProfile RStudioServerProAppSettings": { "AccessStatus": "Indicates whether the current user has access to the `RStudioServerPro` app.", @@ -42507,6 +44032,7 @@ }, "AWS::SageMaker::UserProfile ResourceSpec": { "InstanceType": "The instance type that the image version runs on.\n\n> *JupyterServer apps* only support the `system` value.\n> \n> For *KernelGateway apps* , the `system` value is translated to `ml.t3.medium` . KernelGateway apps also support all other values for available instance types.", + "LifecycleConfigArn": "The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource.", "SageMakerImageArn": "The ARN of the SageMaker image that the image version belongs to.", "SageMakerImageVersionArn": "The ARN of the image version created on the instance." }, @@ -42670,11 +44196,11 @@ "SecretId": "The ARN or name of the secret to attach the resource-based policy.\n\nFor an ARN, we recommend that you specify a complete ARN rather than a partial ARN." }, "AWS::SecretsManager::RotationSchedule": { - "HostedRotationLambda": "Creates a new Lambda rotation function based on one of the [Secrets Manager rotation function templates](https://docs.aws.amazon.com/secretsmanager/latest/userguide/reference_available-rotation-templates.html) . To use a rotation function that already exists, specify `RotationLambdaARN` instead.\n\nFor Amazon RDS master user credentials, see [AWS::RDS::DBCluster MasterUserSecret](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-rds-dbcluster-masterusersecret.html) .", + "HostedRotationLambda": "Creates a new Lambda rotation function based on one of the [Secrets Manager rotation function templates](https://docs.aws.amazon.com/secretsmanager/latest/userguide/reference_available-rotation-templates.html) . To use a rotation function that already exists, specify `RotationLambdaARN` instead.\n\nFor Amazon RDS master user credentials, see [AWS::RDS::DBCluster MasterUserSecret](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-rds-dbcluster-masterusersecret.html) .\n\nFor Amazon Redshift admin user credentials, see [AWS::Redshift::Cluster](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-redshift-cluster.html) .", "RotateImmediatelyOnUpdate": "Specifies whether to rotate the secret immediately or wait until the next scheduled rotation window. The rotation schedule is defined in `RotationRules` .\n\nIf you don't immediately rotate the secret, Secrets Manager tests the rotation configuration by running the [`testSecret` step](https://docs.aws.amazon.com/secretsmanager/latest/userguide/rotate-secrets_how.html) of the Lambda rotation function. The test creates an `AWSPENDING` version of the secret and then removes it.\n\nIf you don't specify this value, then by default, Secrets Manager rotates the secret immediately.\n\nRotation is an asynchronous process. For more information, see [How rotation works](https://docs.aws.amazon.com/secretsmanager/latest/userguide/rotate-secrets_how.html) .", - "RotationLambdaARN": "The ARN of an existing Lambda rotation function. To specify a rotation function that is also defined in this template, use the [Ref](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-ref.html) function.\n\nFor Amazon RDS master user credentials, see [AWS::RDS::DBCluster MasterUserSecret](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-rds-dbcluster-masterusersecret.html) .\n\nTo create a new rotation function based on one of the [Secrets Manager rotation function templates](https://docs.aws.amazon.com/secretsmanager/latest/userguide/reference_available-rotation-templates.html) , specify `HostedRotationLambda` instead.", + "RotationLambdaARN": "The ARN of an existing Lambda rotation function. To specify a rotation function that is also defined in this template, use the [Ref](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-ref.html) function.\n\nFor Amazon RDS master user credentials, see [AWS::RDS::DBCluster MasterUserSecret](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-rds-dbcluster-masterusersecret.html) .\n\nFor Amazon Redshift admin user credentials, see [AWS::Redshift::Cluster](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-redshift-cluster.html) .\n\nTo create a new rotation function based on one of the [Secrets Manager rotation function templates](https://docs.aws.amazon.com/secretsmanager/latest/userguide/reference_available-rotation-templates.html) , specify `HostedRotationLambda` instead.", "RotationRules": "A structure that defines the rotation configuration for this secret.", - "SecretId": "The ARN or name of the secret to rotate.\n\nTo reference a secret also created in this template, use the [Ref](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-ref.html) function with the secret's logical ID." + "SecretId": "The ARN or name of the secret to rotate. This is unique for each rotation schedule definition.\n\nTo reference a secret also created in this template, use the [Ref](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-ref.html) function with the secret's logical ID." }, "AWS::SecretsManager::RotationSchedule HostedRotationLambda": { "ExcludeCharacters": "A string of the characters that you don't want in the password.", @@ -42724,9 +44250,9 @@ "Value": "The string value associated with the key of the tag." }, "AWS::SecretsManager::SecretTargetAttachment": { - "SecretId": "The ARN or name of the secret. To reference a secret also created in this template, use the see [Ref](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-ref.html) function with the secret's logical ID.", + "SecretId": "The ARN or name of the secret. To reference a secret also created in this template, use the see [Ref](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-ref.html) function with the secret's logical ID. This field is unique for each target attachment definition.", "TargetId": "The ID of the database or cluster.", - "TargetType": "A string that defines the type of service or database associated with the secret. This value instructs Secrets Manager how to update the secret with the details of the service or database. This value must be one of the following:\n\n- AWS::RDS::DBInstance\n- AWS::RDS::DBCluster\n- AWS::Redshift::Cluster\n- AWS::DocDB::DBInstance\n- AWS::DocDB::DBCluster" + "TargetType": "A string that defines the type of service or database associated with the secret. This value instructs Secrets Manager how to update the secret with the details of the service or database. This value must be one of the following:\n\n- AWS::RDS::DBInstance\n- AWS::RDS::DBCluster\n- AWS::Redshift::Cluster\n- AWS::RedshiftServerless::Namespace\n- AWS::DocDB::DBInstance\n- AWS::DocDB::DBCluster\n- AWS::DocDBElastic::Cluster" }, "AWS::SecurityHub::AutomationRule": { "Actions": "One or more actions to update finding fields if a finding matches the conditions specified in `Criteria` .", @@ -42740,7 +44266,7 @@ }, "AWS::SecurityHub::AutomationRule AutomationRulesAction": { "FindingFieldsUpdate": "Specifies that the automation rule action is an update to a finding field.", - "Type": "Specifies that the rule action should update the `Types` finding field. The `Types` finding field classifies findings in the format of namespace/category/classifier. For more information, see [Types taxonomy for ASFF](https://docs.aws.amazon.com/securityhub/latest/userguide/securityhub-findings-format-type-taxonomy.html) in the *AWS Security Hub User Guide* ." + "Type": "Specifies the type of action that Security Hub takes when a finding matches the defined criteria of a rule." }, "AWS::SecurityHub::AutomationRule AutomationRulesFindingFieldsUpdate": { "Confidence": "The rule action updates the `Confidence` field of a finding.", @@ -42819,7 +44345,7 @@ }, "AWS::SecurityHub::AutomationRule SeverityUpdate": { "Label": "The severity value of the finding. The allowed values are the following.\n\n- `INFORMATIONAL` - No issue was found.\n- `LOW` - The issue does not require action on its own.\n- `MEDIUM` - The issue must be addressed but not urgently.\n- `HIGH` - The issue must be addressed as a priority.\n- `CRITICAL` - The issue must be remediated immediately to avoid it escalating.", - "Normalized": "The normalized severity for the finding. This attribute is to be deprecated in favor of `Label` .\n\nIf you provide `Normalized` and do not provide `Label` , `Label` is set automatically as follows.\n\n- 0 - `INFORMATIONAL`\n- 1\u201339 - `LOW`\n- 40\u201369 - `MEDIUM`\n- 70\u201389 - `HIGH`\n- 90\u2013100 - `CRITICAL`", + "Normalized": "The normalized severity for the finding. This attribute is to be deprecated in favor of `Label` .\n\nIf you provide `Normalized` and don't provide `Label` , `Label` is set automatically as follows.\n\n- 0 - `INFORMATIONAL`\n- 1\u201339 - `LOW`\n- 40\u201369 - `MEDIUM`\n- 70\u201389 - `HIGH`\n- 90\u2013100 - `CRITICAL`", "Product": "The native severity as defined by the AWS service or integrated partner product that generated the finding." }, "AWS::SecurityHub::AutomationRule StringFilter": { @@ -42827,7 +44353,7 @@ "Value": "The string filter value. Filter values are case sensitive. For example, the product name for control-based findings is `Security Hub` . If you provide `security hub` as the filter value, there's no match." }, "AWS::SecurityHub::AutomationRule WorkflowUpdate": { - "Status": "The status of the investigation into the finding. The workflow status is specific to an individual finding. It does not affect the generation of new findings. For example, setting the workflow status to `SUPPRESSED` or `RESOLVED` does not prevent a new finding for the same issue.\n\nThe allowed values are the following.\n\n- `NEW` - The initial state of a finding, before it is reviewed.\n\nSecurity Hub also resets `WorkFlowStatus` from `NOTIFIED` or `RESOLVED` to `NEW` in the following cases:\n\n- The record state changes from `ARCHIVED` to `ACTIVE` .\n- The compliance status changes from `PASSED` to either `WARNING` , `FAILED` , or `NOT_AVAILABLE` .\n- `NOTIFIED` - Indicates that you notified the resource owner about the security issue. Used when the initial reviewer is not the resource owner, and needs intervention from the resource owner.\n- `RESOLVED` - The finding was reviewed and remediated and is now considered resolved.\n- `SUPPRESSED` - Indicates that you reviewed the finding and do not believe that any action is needed. The finding is no longer updated." + "Status": "The status of the investigation into the finding. The workflow status is specific to an individual finding. It does not affect the generation of new findings. For example, setting the workflow status to `SUPPRESSED` or `RESOLVED` does not prevent a new finding for the same issue.\n\nThe allowed values are the following.\n\n- `NEW` - The initial state of a finding, before it is reviewed.\n\nSecurity Hub also resets `WorkFlowStatus` from `NOTIFIED` or `RESOLVED` to `NEW` in the following cases:\n\n- The record state changes from `ARCHIVED` to `ACTIVE` .\n- The compliance status changes from `PASSED` to either `WARNING` , `FAILED` , or `NOT_AVAILABLE` .\n- `NOTIFIED` - Indicates that you notified the resource owner about the security issue. Used when the initial reviewer is not the resource owner, and needs intervention from the resource owner.\n- `RESOLVED` - The finding was reviewed and remediated and is now considered resolved.\n- `SUPPRESSED` - Indicates that you reviewed the finding and don't believe that any action is needed. The finding is no longer updated." }, "AWS::SecurityHub::ConfigurationPolicy": { "ConfigurationPolicy": "An object that defines how AWS Security Hub is configured. It includes whether Security Hub is enabled or disabled, a list of enabled security standards, a list of enabled or disabled security controls, and a list of custom parameter values for specified controls. If you provide a list of security controls that are enabled in the configuration policy, Security Hub disables all other controls (including newly released controls). If you provide a list of security controls that are disabled in the configuration policy, Security Hub enables all other controls (including newly released controls).", @@ -42857,21 +44383,21 @@ "SecurityControlId": "The ID of the security control." }, "AWS::SecurityHub::ConfigurationPolicy SecurityControlsConfiguration": { - "DisabledSecurityControlIdentifiers": "A list of security controls that are disabled in the configuration policy. Security Hub enables all other controls (including newly released controls) other than the listed controls.", - "EnabledSecurityControlIdentifiers": "A list of security controls that are enabled in the configuration policy. Security Hub disables all other controls (including newly released controls) other than the listed controls.", + "DisabledSecurityControlIdentifiers": "A list of security controls that are disabled in the configuration policy.\n\nProvide only one of `EnabledSecurityControlIdentifiers` or `DisabledSecurityControlIdentifiers` .\n\nIf you provide `DisabledSecurityControlIdentifiers` , Security Hub enables all other controls not in the list, and enables [AutoEnableControls](https://docs.aws.amazon.com/securityhub/1.0/APIReference/API_UpdateSecurityHubConfiguration.html#securityhub-UpdateSecurityHubConfiguration-request-AutoEnableControls) .", + "EnabledSecurityControlIdentifiers": "A list of security controls that are enabled in the configuration policy.\n\nProvide only one of `EnabledSecurityControlIdentifiers` or `DisabledSecurityControlIdentifiers` .\n\nIf you provide `EnabledSecurityControlIdentifiers` , Security Hub disables all other controls not in the list, and disables [AutoEnableControls](https://docs.aws.amazon.com/securityhub/1.0/APIReference/API_UpdateSecurityHubConfiguration.html#securityhub-UpdateSecurityHubConfiguration-request-AutoEnableControls) .", "SecurityControlCustomParameters": "A list of security controls and control parameter values that are included in a configuration policy." }, "AWS::SecurityHub::ConfigurationPolicy SecurityHubPolicy": { - "EnabledStandardIdentifiers": "A list that defines which security standards are enabled in the configuration policy.", - "SecurityControlsConfiguration": "An object that defines which security controls are enabled in the configuration policy. The enablement status of a control is aligned across all of the enabled standards in an account.", + "EnabledStandardIdentifiers": "A list that defines which security standards are enabled in the configuration policy.\n\nThis property is required only if `ServiceEnabled` is set to `true` in your configuration policy.", + "SecurityControlsConfiguration": "An object that defines which security controls are enabled in the configuration policy. The enablement status of a control is aligned across all of the enabled standards in an account.\n\nThis property is required only if `ServiceEnabled` is set to true in your configuration policy.", "ServiceEnabled": "Indicates whether Security Hub is enabled in the policy." }, "AWS::SecurityHub::DelegatedAdmin": { "AdminAccountId": "The AWS account identifier of the account to designate as the Security Hub administrator account." }, "AWS::SecurityHub::FindingAggregator": { - "RegionLinkingMode": "Indicates whether to aggregate findings from all of the available Regions in the current partition. Also determines whether to automatically aggregate findings from new Regions as Security Hub supports them and you opt into them.\n\nThe selected option also determines how to use the Regions provided in the Regions list.\n\nThe options are as follows:\n\n- `ALL_REGIONS` - Indicates to aggregate findings from all of the Regions where Security Hub is enabled. When you choose this option, Security Hub also automatically aggregates findings from new Regions as Security Hub supports them and you opt into them.\n- `ALL_REGIONS_EXCEPT_SPECIFIED` - Indicates to aggregate findings from all of the Regions where Security Hub is enabled, except for the Regions listed in the `Regions` parameter. When you choose this option, Security Hub also automatically aggregates findings from new Regions as Security Hub supports them and you opt into them.\n- `SPECIFIED_REGIONS` - Indicates to aggregate findings only from the Regions listed in the `Regions` parameter. Security Hub does not automatically aggregate findings from new Regions.", - "Regions": "If `RegionLinkingMode` is `ALL_REGIONS_EXCEPT_SPECIFIED` , then this is a space-separated list of Regions that do not aggregate findings to the aggregation Region.\n\nIf `RegionLinkingMode` is `SPECIFIED_REGIONS` , then this is a space-separated list of Regions that do aggregate findings to the aggregation Region." + "RegionLinkingMode": "Indicates whether to aggregate findings from all of the available Regions in the current partition. Also determines whether to automatically aggregate findings from new Regions as Security Hub supports them and you opt into them.\n\nThe selected option also determines how to use the Regions provided in the Regions list.\n\nThe options are as follows:\n\n- `ALL_REGIONS` - Aggregates findings from all of the Regions where Security Hub is enabled. When you choose this option, Security Hub also automatically aggregates findings from new Regions as Security Hub supports them and you opt into them.\n- `ALL_REGIONS_EXCEPT_SPECIFIED` - Aggregates findings from all of the Regions where Security Hub is enabled, except for the Regions listed in the `Regions` parameter. When you choose this option, Security Hub also automatically aggregates findings from new Regions as Security Hub supports them and you opt into them.\n- `SPECIFIED_REGIONS` - Aggregates findings only from the Regions listed in the `Regions` parameter. Security Hub does not automatically aggregate findings from new Regions.\n- `NO_REGIONS` - Aggregates no data because no Regions are selected as linked Regions.", + "Regions": "If `RegionLinkingMode` is `ALL_REGIONS_EXCEPT_SPECIFIED` , then this is a space-separated list of Regions that don't replicate and send findings to the home Region.\n\nIf `RegionLinkingMode` is `SPECIFIED_REGIONS` , then this is a space-separated list of Regions that do replicate and send findings to the home Region.\n\nAn `InvalidInputException` error results if you populate this field while `RegionLinkingMode` is `NO_REGIONS` ." }, "AWS::SecurityHub::Hub": { "AutoEnableControls": "Whether to automatically enable new controls when they are added to standards that are enabled.\n\nBy default, this is set to `true` , and new controls are enabled automatically. To not automatically enable new controls, set this to `false` .", @@ -42971,7 +44497,7 @@ "ResourceType": "Specifies the type of the resource that details are provided for.", "Sample": "Indicates whether or not sample findings are included in the filter results.", "SeverityLabel": "The label of a finding's severity.", - "SeverityNormalized": "Deprecated. The normalized severity of a finding. Instead of providing `Normalized` , provide `Label` .\n\nThe value of `Normalized` can be an integer between `0` and `100` .\n\nIf you provide `Label` and do not provide `Normalized` , then `Normalized` is set automatically as follows.\n\n- `INFORMATIONAL` - 0\n- `LOW` - 1\n- `MEDIUM` - 40\n- `HIGH` - 70\n- `CRITICAL` - 90", + "SeverityNormalized": "Deprecated. The normalized severity of a finding. Instead of providing `Normalized` , provide `Label` .\n\nThe value of `Normalized` can be an integer between `0` and `100` .\n\nIf you provide `Label` and don't provide `Normalized` , then `Normalized` is set automatically as follows.\n\n- `INFORMATIONAL` - 0\n- `LOW` - 1\n- `MEDIUM` - 40\n- `HIGH` - 70\n- `CRITICAL` - 90", "SeverityProduct": "Deprecated. This attribute isn't included in findings. Instead of providing `Product` , provide `Original` .\n\nThe native severity as defined by the AWS service or integrated partner product that generated the finding.", "SourceUrl": "A URL that links to a page about the current finding in the security findings provider's solution.", "ThreatIntelIndicatorCategory": "The category of a threat intelligence indicator.", @@ -42988,7 +44514,7 @@ "VulnerabilitiesExploitAvailable": "Indicates whether a software vulnerability in your environment has a known exploit. You can filter findings by this field only if you use Security Hub and Amazon Inspector.", "VulnerabilitiesFixAvailable": "Indicates whether a vulnerability is fixed in a newer version of the affected software packages. You can filter findings by this field only if you use Security Hub and Amazon Inspector.", "WorkflowState": "The workflow state of a finding.\n\nNote that this field is deprecated. To search for a finding based on its workflow status, use `WorkflowStatus` .", - "WorkflowStatus": "The status of the investigation into a finding. Allowed values are the following.\n\n- `NEW` - The initial state of a finding, before it is reviewed.\n\nSecurity Hub also resets the workflow status from `NOTIFIED` or `RESOLVED` to `NEW` in the following cases:\n\n- `RecordState` changes from `ARCHIVED` to `ACTIVE` .\n- `Compliance.Status` changes from `PASSED` to either `WARNING` , `FAILED` , or `NOT_AVAILABLE` .\n- `NOTIFIED` - Indicates that the resource owner has been notified about the security issue. Used when the initial reviewer is not the resource owner, and needs intervention from the resource owner.\n\nIf one of the following occurs, the workflow status is changed automatically from `NOTIFIED` to `NEW` :\n\n- `RecordState` changes from `ARCHIVED` to `ACTIVE` .\n- `Compliance.Status` changes from `PASSED` to `FAILED` , `WARNING` , or `NOT_AVAILABLE` .\n- `SUPPRESSED` - Indicates that you reviewed the finding and do not believe that any action is needed.\n\nThe workflow status of a `SUPPRESSED` finding does not change if `RecordState` changes from `ARCHIVED` to `ACTIVE` .\n- `RESOLVED` - The finding was reviewed and remediated and is now considered resolved.\n\nThe finding remains `RESOLVED` unless one of the following occurs:\n\n- `RecordState` changes from `ARCHIVED` to `ACTIVE` .\n- `Compliance.Status` changes from `PASSED` to `FAILED` , `WARNING` , or `NOT_AVAILABLE` .\n\nIn those cases, the workflow status is automatically reset to `NEW` .\n\nFor findings from controls, if `Compliance.Status` is `PASSED` , then Security Hub automatically sets the workflow status to `RESOLVED` ." + "WorkflowStatus": "The status of the investigation into a finding. Allowed values are the following.\n\n- `NEW` - The initial state of a finding, before it is reviewed.\n\nSecurity Hub also resets the workflow status from `NOTIFIED` or `RESOLVED` to `NEW` in the following cases:\n\n- `RecordState` changes from `ARCHIVED` to `ACTIVE` .\n- `Compliance.Status` changes from `PASSED` to either `WARNING` , `FAILED` , or `NOT_AVAILABLE` .\n- `NOTIFIED` - Indicates that the resource owner has been notified about the security issue. Used when the initial reviewer is not the resource owner, and needs intervention from the resource owner.\n\nIf one of the following occurs, the workflow status is changed automatically from `NOTIFIED` to `NEW` :\n\n- `RecordState` changes from `ARCHIVED` to `ACTIVE` .\n- `Compliance.Status` changes from `PASSED` to `FAILED` , `WARNING` , or `NOT_AVAILABLE` .\n- `SUPPRESSED` - Indicates that you reviewed the finding and don't believe that any action is needed.\n\nThe workflow status of a `SUPPRESSED` finding does not change if `RecordState` changes from `ARCHIVED` to `ACTIVE` .\n- `RESOLVED` - The finding was reviewed and remediated and is now considered resolved.\n\nThe finding remains `RESOLVED` unless one of the following occurs:\n\n- `RecordState` changes from `ARCHIVED` to `ACTIVE` .\n- `Compliance.Status` changes from `PASSED` to `FAILED` , `WARNING` , or `NOT_AVAILABLE` .\n\nIn those cases, the workflow status is automatically reset to `NEW` .\n\nFor findings from controls, if `Compliance.Status` is `PASSED` , then Security Hub automatically sets the workflow status to `RESOLVED` ." }, "AWS::SecurityHub::Insight BooleanFilter": { "Value": "The value of the boolean." @@ -43394,7 +44920,7 @@ "Type": "If present, specifies that the service instances are only discoverable using the `DiscoverInstances` API operation. No DNS records is registered for the service instances. The only valid value is `HTTP` ." }, "AWS::ServiceDiscovery::Service DnsConfig": { - "DnsRecords": "An array that contains one `DnsRecord` object for each Route\u00a053 DNS record that you want AWS Cloud Map to create when you register an instance.", + "DnsRecords": "An array that contains one `DnsRecord` object for each Route\u00a053 DNS record that you want AWS Cloud Map to create when you register an instance.\n\n> The record type of a service can't be updated directly and can only be changed by deleting the service and recreating it with a new `DnsConfig` .", "NamespaceId": "The ID of the namespace to use for DNS configuration.\n\n> You must specify a value for `NamespaceId` either for `DnsConfig` or for the [service properties](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-servicediscovery-service.html) . Don't specify a value in both places.", "RoutingPolicy": "The routing policy that you want to apply to all Route\u00a053 DNS records that AWS Cloud Map creates when you register an instance and specify this service.\n\n> If you want to use this service to register instances that create alias records, specify `WEIGHTED` for the routing policy. \n\nYou can specify the following values:\n\n- **MULTIVALUE** - If you define a health check for the service and the health check is healthy, Route\u00a053 returns the applicable value for up to eight instances.\n\nFor example, suppose that the service includes configurations for one `A` record and a health check. You use the service to register 10 instances. Route\u00a053 responds to DNS queries with IP addresses for up to eight healthy instances. If fewer than eight instances are healthy, Route\u00a053 responds to every DNS query with the IP addresses for all of the healthy instances.\n\nIf you don't define a health check for the service, Route\u00a053 assumes that all instances are healthy and returns the values for up to eight instances.\n\nFor more information about the multivalue routing policy, see [Multivalue Answer Routing](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-policy.html#routing-policy-multivalue) in the *Route\u00a053 Developer Guide* .\n- **WEIGHTED** - Route\u00a053 returns the applicable value from one randomly selected instance from among the instances that you registered using the same service. Currently, all records have the same weight, so you can't route more or less traffic to any instances.\n\nFor example, suppose that the service includes configurations for one `A` record and a health check. You use the service to register 10 instances. Route\u00a053 responds to DNS queries with the IP address for one randomly selected instance from among the healthy instances. If no instances are healthy, Route\u00a053 responds to DNS queries as if all of the instances were healthy.\n\nIf you don't define a health check for the service, Route\u00a053 assumes that all instances are healthy and returns the applicable value for one randomly selected instance.\n\nFor more information about the weighted routing policy, see [Weighted Routing](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-policy.html#routing-policy-weighted) in the *Route\u00a053 Developer Guide* ." }, @@ -43491,14 +45017,14 @@ "ObjectKey": "The key name of an object in Amazon S3. For more information about Amazon S3 objects and object keys, see [Uploading, downloading, and working with objects in Amazon S3](https://docs.aws.amazon.com/AmazonS3/latest/userguide/uploading-downloading-objects.html) in the *Amazon Simple Storage Service User Guide* ." }, "AWS::StepFunctions::Activity": { - "EncryptionConfiguration": "", + "EncryptionConfiguration": "Encryption configuration for the activity.\n\nActivity configuration is immutable, and resource names must be unique. To set customer managed keys for encryption, you must create a *new Activity* . If you attempt to change the configuration in your CFN template for an existing activity, you will receive an `ActivityAlreadyExists` exception.\n\nTo update your activity to include customer managed keys, set a new activity name within your AWS CloudFormation template.", "Name": "The name of the activity.\n\nA name must *not* contain:\n\n- white space\n- brackets `< > { } [ ]`\n- wildcard characters `? *`\n- special characters `\" # % \\ ^ | ~ ` $ & , ; : /`\n- control characters ( `U+0000-001F` , `U+007F-009F` )\n\nTo enable logging with CloudWatch Logs, the name should only contain 0-9, A-Z, a-z, - and _.", "Tags": "The list of tags to add to a resource.\n\nTags may only contain Unicode letters, digits, white space, or these symbols: `_ . : / = + - @` ." }, "AWS::StepFunctions::Activity EncryptionConfiguration": { - "KmsDataKeyReusePeriodSeconds": "", - "KmsKeyId": "", - "Type": "" + "KmsDataKeyReusePeriodSeconds": "Maximum duration that Step Functions will reuse data keys. When the period expires, Step Functions will call `GenerateDataKey` . Only applies to customer managed keys.", + "KmsKeyId": "An alias, alias ARN, key ID, or key ARN of a symmetric encryption AWS KMS key to encrypt data. To specify a AWS KMS key in a different AWS account, you must use the key ARN or alias ARN.", + "Type": "Encryption option for an activity." }, "AWS::StepFunctions::Activity TagsEntry": { "Key": "The `key` for a key-value pair in a tag entry.", @@ -43509,7 +45035,7 @@ "DefinitionS3Location": "The name of the S3 bucket where the state machine definition is stored. The state machine definition must be a JSON or YAML file.", "DefinitionString": "The Amazon States Language definition of the state machine. The state machine definition must be in JSON. See [Amazon States Language](https://docs.aws.amazon.com/step-functions/latest/dg/concepts-amazon-states-language.html) .", "DefinitionSubstitutions": "A map (string to string) that specifies the mappings for placeholder variables in the state machine definition. This enables the customer to inject values obtained at runtime, for example from intrinsic functions, in the state machine definition. Variables can be template parameter names, resource logical IDs, resource attributes, or a variable in a key-value map.\n\nSubstitutions must follow the syntax: `${key_name}` or `${variable_1,variable_2,...}` .", - "EncryptionConfiguration": "", + "EncryptionConfiguration": "Encryption configuration for the state machine.", "LoggingConfiguration": "Defines what execution history events are logged and where they are logged.\n\n> By default, the `level` is set to `OFF` . For more information see [Log Levels](https://docs.aws.amazon.com/step-functions/latest/dg/cloudwatch-log-level.html) in the AWS Step Functions User Guide.", "RoleArn": "The Amazon Resource Name (ARN) of the IAM role to use for this state machine.", "StateMachineName": "The name of the state machine.\n\nA name must *not* contain:\n\n- white space\n- brackets `< > { } [ ]`\n- wildcard characters `? *`\n- special characters `\" # % \\ ^ | ~ ` $ & , ; : /`\n- control characters ( `U+0000-001F` , `U+007F-009F` )\n\n> If you specify a name, you cannot perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you must replace the resource, specify a new name.", @@ -43521,9 +45047,9 @@ "LogGroupArn": "The ARN of the the CloudWatch log group to which you want your logs emitted to. The ARN must end with `:*`" }, "AWS::StepFunctions::StateMachine EncryptionConfiguration": { - "KmsDataKeyReusePeriodSeconds": "", - "KmsKeyId": "", - "Type": "" + "KmsDataKeyReusePeriodSeconds": "Maximum duration that Step Functions will reuse data keys. When the period expires, Step Functions will call `GenerateDataKey` . Only applies to customer managed keys.", + "KmsKeyId": "An alias, alias ARN, key ID, or key ARN of a symmetric encryption AWS KMS key to encrypt data. To specify a AWS KMS key in a different AWS account, you must use the key ARN or alias ARN.", + "Type": "Encryption option for a state machine." }, "AWS::StepFunctions::StateMachine LogDestination": { "CloudWatchLogsLogGroup": "An object describing a CloudWatch log group. For more information, see [AWS::Logs::LogGroup](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-logs-loggroup.html) in the AWS CloudFormation User Guide." @@ -43552,7 +45078,7 @@ "RoutingConfiguration": "The routing configuration of an alias. Routing configuration splits [StartExecution](https://docs.aws.amazon.com/step-functions/latest/apireference/API_StartExecution.html) requests between one or two versions of the same state machine.\n\nUse `RoutingConfiguration` if you want to explicitly set the alias [weights](https://docs.aws.amazon.com/step-functions/latest/apireference/API_RoutingConfigurationListItem.html#StepFunctions-Type-RoutingConfigurationListItem-weight) . Weight is the percentage of traffic you want to route to a state machine version.\n\n> `RoutingConfiguration` and `DeploymentPreference` are mutually exclusive properties. You must define only one of these properties." }, "AWS::StepFunctions::StateMachineAlias DeploymentPreference": { - "Alarms": "A list of Amazon CloudWatch alarms to be monitored during the deployment. The deployment fails and rolls back if any of these alarms go into the `ALARM` state.", + "Alarms": "A list of Amazon CloudWatch alarm names to be monitored during the deployment. The deployment fails and rolls back if any of these alarms go into the `ALARM` state.\n\n> Amazon CloudWatch considers nonexistent alarms to have an `OK` state. If you provide an invalid alarm name or provide the ARN of an alarm instead of its name, your deployment may not roll back correctly.", "Interval": "The time in minutes between each traffic shifting increment.", "Percentage": "The percentage of traffic to shift to the new version in each increment.", "StateMachineVersionArn": "The Amazon Resource Name (ARN) of the [`AWS::StepFunctions::StateMachineVersion`](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-stepfunctions-statemachineversion.html) resource that will be the final version to which the alias points to when the traffic shifting is complete.\n\nWhile performing gradual deployments, you can only provide a single state machine version ARN. To explicitly set version weights in a CloudFormation template, use `RoutingConfiguration` instead.", @@ -43591,6 +45117,7 @@ "ExecutionRoleArn": "The ARN of the IAM role to be used to run the canary. This role must already exist, and must include `lambda.amazonaws.com` as a principal in the trust policy. The role must also have the following permissions:\n\n- `s3:PutObject`\n- `s3:GetBucketLocation`\n- `s3:ListAllMyBuckets`\n- `cloudwatch:PutMetricData`\n- `logs:CreateLogGroup`\n- `logs:CreateLogStream`\n- `logs:PutLogEvents`", "FailureRetentionPeriod": "The number of days to retain data about failed runs of this canary. If you omit this field, the default of 31 days is used. The valid range is 1 to 455 days.", "Name": "The name for this canary. Be sure to give it a descriptive name that distinguishes it from other canaries in your account.\n\nDo not include secrets or proprietary information in your canary names. The canary name makes up part of the canary ARN, and the ARN is included in outbound calls over the internet. For more information, see [Security Considerations for Synthetics Canaries](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/servicelens_canaries_security.html) .", + "ResourcesToReplicateTags": "To have the tags that you apply to this canary also be applied to the Lambda function that the canary uses, specify this property with the value `lambda-function` . If you do this, CloudWatch Synthetics will keep the tags of the canary and the Lambda function synchronized. Any future changes you make to the canary's tags will also be applied to the function.", "RunConfig": "A structure that contains input information for a canary run. If you omit this structure, the frequency of the canary is used as canary's timeout value, up to a maximum of 900 seconds.", "RuntimeVersion": "Specifies the runtime version to use for the canary. For more information about runtime versions, see [Canary Runtime Versions](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CloudWatch_Synthetics_Canaries_Library.html) .", "Schedule": "A structure that contains information about how often the canary is to run, and when these runs are to stop.", @@ -43655,6 +45182,7 @@ "ApplicationId": "The ID of the application.", "ApplicationType": "The type of the application.", "Credentials": "The credentials of the SAP application.", + "DatabaseArn": "The Amazon Resource Name (ARN) of the database.", "Instances": "The Amazon EC2 instances on which your SAP application is running.", "SapInstanceNumber": "The SAP instance number of the application.", "Sid": "The System ID of the application.", @@ -45291,7 +46819,7 @@ "DesiredSoftwareSetId": "The ID of the software set to apply.", "DesktopArn": "The Amazon Resource Name (ARN) of the desktop to stream from Amazon WorkSpaces, WorkSpaces Web, or AppStream 2.0.", "DesktopEndpoint": "The URL for the identity provider login (only for environments that use AppStream 2.0).", - "DeviceCreationTags": "\"The tag keys and optional values for the newly created devices for this environment.\"", + "DeviceCreationTags": "The tag keys and optional values for the newly created devices for this environment.", "KmsKeyArn": "The Amazon Resource Name (ARN) of the AWS Key Management Service key used to encrypt the environment.", "MaintenanceWindow": "A specification for a time window to apply software updates.", "Name": "The name of the environment.", @@ -45326,7 +46854,12 @@ "IdentityProviderDetails": "The identity provider details. The following list describes the provider detail keys for each identity provider type.\n\n- For Google and Login with Amazon:\n\n- `client_id`\n- `client_secret`\n- `authorize_scopes`\n- For Facebook:\n\n- `client_id`\n- `client_secret`\n- `authorize_scopes`\n- `api_version`\n- For Sign in with Apple:\n\n- `client_id`\n- `team_id`\n- `key_id`\n- `private_key`\n- `authorize_scopes`\n- For OIDC providers:\n\n- `client_id`\n- `client_secret`\n- `attributes_request_method`\n- `oidc_issuer`\n- `authorize_scopes`\n- `authorize_url` *if not available from discovery URL specified by oidc_issuer key*\n- `token_url` *if not available from discovery URL specified by oidc_issuer key*\n- `attributes_url` *if not available from discovery URL specified by oidc_issuer key*\n- `jwks_uri` *if not available from discovery URL specified by oidc_issuer key*\n- For SAML providers:\n\n- `MetadataFile` OR `MetadataURL`\n- `IDPSignout` (boolean) *optional*\n- `IDPInit` (boolean) *optional*\n- `RequestSigningAlgorithm` (string) *optional* - Only accepts `rsa-sha256`\n- `EncryptedResponses` (boolean) *optional*", "IdentityProviderName": "The identity provider name.", "IdentityProviderType": "The identity provider type.", - "PortalArn": "The ARN of the identity provider." + "PortalArn": "The ARN of the identity provider.", + "Tags": "" + }, + "AWS::WorkSpacesWeb::IdentityProvider Tag": { + "Key": "The key of the tag.", + "Value": "The value of the tag" }, "AWS::WorkSpacesWeb::IpAccessSettings": { "AdditionalEncryptionContext": "Additional encryption context of the IP access settings.", @@ -45394,6 +46927,7 @@ "CookieSynchronizationConfiguration": "The configuration that specifies which cookies should be synchronized from the end user's local browser to the remote browser.", "CopyAllowed": "Specifies whether the user can copy text from the streaming session to the local device.", "CustomerManagedKey": "The customer managed key used to encrypt sensitive information in the user settings.", + "DeepLinkAllowed": "Specifies whether the user can use deep links that open automatically when connecting to a session.", "DisconnectTimeoutInMinutes": "The amount of time that a streaming session remains active after users disconnect.", "DownloadAllowed": "Specifies whether the user can download files from the streaming session to the local device.", "IdleDisconnectTimeoutInMinutes": "The amount of time that users can be idle (inactive) before they are disconnected from their streaming session and the disconnect timeout interval begins.", diff --git a/schema_source/cloudformation.schema.json b/schema_source/cloudformation.schema.json index 2eb1dfd67..93120609a 100644 --- a/schema_source/cloudformation.schema.json +++ b/schema_source/cloudformation.schema.json @@ -571,7 +571,7 @@ }, "RevocationConfiguration": { "$ref": "#/definitions/AWS::ACMPCA::CertificateAuthority.RevocationConfiguration", - "markdownDescription": "Certificate revocation information used by the [CreateCertificateAuthority](https://docs.aws.amazon.com/privateca/latest/APIReference/API_CreateCertificateAuthority.html) and [UpdateCertificateAuthority](https://docs.aws.amazon.com/privateca/latest/APIReference/API_UpdateCertificateAuthority.html) actions. Your private certificate authority (CA) can configure Online Certificate Status Protocol (OCSP) support and/or maintain a certificate revocation list (CRL). OCSP returns validation information about certificates as requested by clients, and a CRL contains an updated list of certificates revoked by your CA. For more information, see [RevokeCertificate](https://docs.aws.amazon.com/privateca/latest/APIReference/API_RevokeCertificate.html) in the *AWS Private CA API Reference* and [Setting up a certificate revocation method](https://docs.aws.amazon.com/privateca/latest/userguide/revocation-setup.html) in the *AWS Private CA User Guide* .\n\n> The following requirements apply to revocation configurations.\n> \n> - A configuration disabling CRLs or OCSP must contain only the `Enabled=False` parameter, and will fail if other parameters such as `CustomCname` or `ExpirationInDays` are included.\n> - In a CRL configuration, the `S3BucketName` parameter must conform to the [Amazon S3 bucket naming rules](https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html) .\n> - A configuration containing a custom Canonical Name (CNAME) parameter for CRLs or OCSP must conform to [RFC2396](https://docs.aws.amazon.com/https://www.ietf.org/rfc/rfc2396.txt) restrictions on the use of special characters in a CNAME.\n> - In a CRL or OCSP configuration, the value of a CNAME parameter must not include a protocol prefix such as \"http://\" or \"https://\".", + "markdownDescription": "Information about the Online Certificate Status Protocol (OCSP) configuration or certificate revocation list (CRL) created and maintained by your private CA.", "title": "RevocationConfiguration" }, "SigningAlgorithm": { @@ -2573,7 +2573,7 @@ "type": "string" }, "Platform": { - "markdownDescription": "The platform for the Amplify app. For a static app, set the platform type to `WEB` . For a dynamic server-side rendered (SSR) app, set the platform type to `WEB_COMPUTE` . For an app requiring Amplify Hosting's original SSR support only, set the platform type to `WEB_DYNAMIC` .", + "markdownDescription": "The platform for the Amplify app. For a static app, set the platform type to `WEB` . For a dynamic server-side rendered (SSR) app, set the platform type to `WEB_COMPUTE` . For an app requiring Amplify Hosting's original SSR support only, set the platform type to `WEB_DYNAMIC` .\n\nIf you are deploying an SSG only app with Next.js version 14 or later, you must set the platform type to `WEB_COMPUTE` and set the artifacts `baseDirectory` to `.next` in the application's build settings. For an example of the build specification settings, see [Amplify build settings for a Next.js 14 SSG application](https://docs.aws.amazon.com/amplify/latest/userguide/deploy-nextjs-app.html#build-setting-detection-ssg-14) in the *Amplify Hosting User Guide* .", "title": "Platform", "type": "string" }, @@ -8135,9 +8135,13 @@ "additionalProperties": false, "properties": { "Destination": { + "markdownDescription": "Specifies the location of the response to modify, and how to modify it. To learn more, see [Transforming API requests and responses](https://docs.aws.amazon.com/apigateway/latest/developerguide/http-api-parameter-mapping.html) .", + "title": "Destination", "type": "string" }, "Source": { + "markdownDescription": "Specifies the data to update the parameter with. To learn more, see [Transforming API requests and responses](https://docs.aws.amazon.com/apigateway/latest/developerguide/http-api-parameter-mapping.html) .", + "title": "Source", "type": "string" } }, @@ -9014,7 +9018,7 @@ "type": "string" }, "LocationUri": { - "markdownDescription": "A URI to locate the configuration. You can specify the following:\n\n- For the AWS AppConfig hosted configuration store and for feature flags, specify `hosted` .\n- For an AWS Systems Manager Parameter Store parameter, specify either the parameter name in the format `ssm-parameter://` or the ARN.\n- For an AWS CodePipeline pipeline, specify the URI in the following format: `codepipeline` ://.\n- For an AWS Secrets Manager secret, specify the URI in the following format: `secretsmanager` ://.\n- For an Amazon S3 object, specify the URI in the following format: `s3:///` . Here is an example: `s3://my-bucket/my-app/us-east-1/my-config.json`\n- For an SSM document, specify either the document name in the format `ssm-document://` or the Amazon Resource Name (ARN).", + "markdownDescription": "A URI to locate the configuration. You can specify the following:\n\n- For the AWS AppConfig hosted configuration store and for feature flags, specify `hosted` .\n- For an AWS Systems Manager Parameter Store parameter, specify either the parameter name in the format `ssm-parameter://` or the ARN.\n- For an AWS CodePipeline pipeline, specify the URI in the following format: `codepipeline` ://.\n- For an AWS Secrets Manager secret, specify the URI in the following format: `secretsmanager` ://.\n- For an Amazon S3 object, specify the URI in the following format: `s3:///` . Here is an example: `s3://amzn-s3-demo-bucket/my-app/us-east-1/my-config.json`\n- For an SSM document, specify either the document name in the format `ssm-document://` or the Amazon Resource Name (ARN).", "title": "LocationUri", "type": "string" }, @@ -20094,7 +20098,7 @@ "type": "boolean" }, "FieldLogLevel": { - "markdownDescription": "The field logging level. Values can be NONE, ERROR, or ALL.\n\n- *NONE* : No field-level logs are captured.\n- *ERROR* : Logs the following information only for the fields that are in error:\n\n- The error section in the server response.\n- Field-level errors.\n- The generated request/response functions that got resolved for error fields.\n- *ALL* : The following information is logged for all fields in the query:\n\n- Field-level tracing information.\n- The generated request/response functions that got resolved for each field.", + "markdownDescription": "The field logging level. Values can be NONE, ERROR, INFO, DEBUG, or ALL.\n\n- *NONE* : No field-level logs are captured.\n- *ERROR* : Logs the following information *only* for the fields that are in the error category:\n\n- The error section in the server response.\n- Field-level errors.\n- The generated request/response functions that got resolved for error fields.\n- *INFO* : Logs the following information *only* for the fields that are in the info and error categories:\n\n- Info-level messages.\n- The user messages sent through `$util.log.info` and `console.log` .\n- Field-level tracing and mapping logs are not shown.\n- *DEBUG* : Logs the following information *only* for the fields that are in the debug, info, and error categories:\n\n- Debug-level messages.\n- The user messages sent through `$util.log.info` , `$util.log.debug` , `console.log` , and `console.debug` .\n- Field-level tracing and mapping logs are not shown.\n- *ALL* : The following information is logged for all fields in the query:\n\n- Field-level tracing information.\n- The generated request/response functions that were resolved for each field.", "title": "FieldLogLevel", "type": "string" } @@ -21172,7 +21176,7 @@ "items": { "$ref": "#/definitions/AWS::ApplicationInsights::Application.ComponentMonitoringSetting" }, - "markdownDescription": "The monitoring settings of the components.", + "markdownDescription": "The monitoring settings of the components. Not required to set up default monitoring for all components. To set up default monitoring for all components, set `AutoConfigurationEnabled` to `true` .", "title": "ComponentMonitoringSettings", "type": "array" }, @@ -21303,7 +21307,7 @@ "additionalProperties": false, "properties": { "ComponentARN": { - "markdownDescription": "The ARN of the component.", + "markdownDescription": "The ARN of the component. Either the component ARN or the component name is required.", "title": "ComponentARN", "type": "string" }, @@ -21313,7 +21317,7 @@ "type": "string" }, "ComponentName": { - "markdownDescription": "The name of the component.", + "markdownDescription": "The name of the component. Either the component ARN or the component name is required.", "title": "ComponentName", "type": "string" }, @@ -22236,7 +22240,7 @@ "additionalProperties": false, "properties": { "S3AclOption": { - "markdownDescription": "The Amazon S3 canned ACL that Athena should specify when storing query results. Currently the only supported canned ACL is `BUCKET_OWNER_FULL_CONTROL` . If a query runs in a workgroup and the workgroup overrides client-side settings, then the Amazon S3 canned ACL specified in the workgroup's settings is used for all queries that run in the workgroup. For more information about Amazon S3 canned ACLs, see [Canned ACL](https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#canned-acl) in the *Amazon S3 User Guide* .", + "markdownDescription": "The Amazon S3 canned ACL that Athena should specify when storing query results, including data files inserted by Athena as the result of statements like CTAS or INSERT INTO. Currently the only supported canned ACL is `BUCKET_OWNER_FULL_CONTROL` . If a query runs in a workgroup and the workgroup overrides client-side settings, then the Amazon S3 canned ACL specified in the workgroup's settings is used for all queries that run in the workgroup. For more information about Amazon S3 canned ACLs, see [Canned ACL](https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#canned-acl) in the *Amazon S3 User Guide* .", "title": "S3AclOption", "type": "string" } @@ -22305,7 +22309,7 @@ }, "EncryptionConfiguration": { "$ref": "#/definitions/AWS::Athena::WorkGroup.EncryptionConfiguration", - "markdownDescription": "If query results are encrypted in Amazon S3, indicates the encryption option used (for example, `SSE_KMS` or `CSE_KMS` ) and key information. This is a client-side setting. If workgroup settings override client-side settings, then the query uses the encryption configuration that is specified for the workgroup, and also uses the location for storing query results specified in the workgroup. See `EnforceWorkGroupConfiguration` and [Workgroup Settings Override Client-Side Settings](https://docs.aws.amazon.com/athena/latest/ug/workgroups-settings-override.html) .", + "markdownDescription": "If query results are encrypted in Amazon S3, indicates the encryption option used (for example, `SSE_KMS` or `CSE_KMS` ) and key information. This is a client-side setting. If workgroup settings override client-side settings, then the query uses the encryption configuration that is specified for the workgroup, and also uses the location for storing query results specified in the workgroup. See `EnforceWorkGroupConfiguration` and [Override client-side settings](https://docs.aws.amazon.com/athena/latest/ug/workgroups-settings-override.html) .", "title": "EncryptionConfiguration" }, "ExpectedBucketOwner": { @@ -22314,7 +22318,7 @@ "type": "string" }, "OutputLocation": { - "markdownDescription": "The location in Amazon S3 where your query results are stored, such as `s3://path/to/query/bucket/` . To run a query, you must specify the query results location using either a client-side setting for individual queries or a location specified by the workgroup. If workgroup settings override client-side settings, then the query uses the location specified for the workgroup. If no query location is set, Athena issues an error. For more information, see [Working with Query Results, Output Files, and Query History](https://docs.aws.amazon.com/athena/latest/ug/querying.html) and `EnforceWorkGroupConfiguration` .", + "markdownDescription": "The location in Amazon S3 where your query results are stored, such as `s3://path/to/query/bucket/` . To run a query, you must specify the query results location using either a client-side setting for individual queries or a location specified by the workgroup. If workgroup settings override client-side settings, then the query uses the location specified for the workgroup. If no query location is set, Athena issues an error. For more information, see [Work with query results and recent queries](https://docs.aws.amazon.com/athena/latest/ug/querying.html) and `EnforceWorkGroupConfiguration` .", "title": "OutputLocation", "type": "string" } @@ -22340,7 +22344,7 @@ "title": "CustomerContentEncryptionConfiguration" }, "EnforceWorkGroupConfiguration": { - "markdownDescription": "If set to \"true\", the settings for the workgroup override client-side settings. If set to \"false\", client-side settings are used. For more information, see [Workgroup Settings Override Client-Side Settings](https://docs.aws.amazon.com/athena/latest/ug/workgroups-settings-override.html) .", + "markdownDescription": "If set to \"true\", the settings for the workgroup override client-side settings. If set to \"false\", client-side settings are used. For more information, see [Override client-side settings](https://docs.aws.amazon.com/athena/latest/ug/workgroups-settings-override.html) .", "title": "EnforceWorkGroupConfiguration", "type": "boolean" }, @@ -22366,7 +22370,7 @@ }, "ResultConfiguration": { "$ref": "#/definitions/AWS::Athena::WorkGroup.ResultConfiguration", - "markdownDescription": "Specifies the location in Amazon S3 where query results are stored and the encryption option, if any, used for query results. For more information, see [Working with Query Results, Output Files, and Query History](https://docs.aws.amazon.com/athena/latest/ug/querying.html) .", + "markdownDescription": "Specifies the location in Amazon S3 where query results are stored and the encryption option, if any, used for query results. For more information, see [Work with query results and recent queries](https://docs.aws.amazon.com/athena/latest/ug/querying.html) .", "title": "ResultConfiguration" } }, @@ -22723,7 +22727,7 @@ "type": "number" }, "HealthCheckType": { - "markdownDescription": "A comma-separated value string of one or more health check types.\n\nThe valid values are `EC2` , `ELB` , and `VPC_LATTICE` . `EC2` is the default health check and cannot be disabled. For more information, see [Health checks for instances in an Auto Scaling group](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-health-checks.html) in the *Amazon EC2 Auto Scaling User Guide* .\n\nOnly specify `EC2` if you must clear a value that was previously set.", + "markdownDescription": "A comma-separated value string of one or more health check types.\n\nThe valid values are `EC2` , `EBS` , `ELB` , and `VPC_LATTICE` . `EC2` is the default health check and cannot be disabled. For more information, see [Health checks for instances in an Auto Scaling group](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-health-checks.html) in the *Amazon EC2 Auto Scaling User Guide* .\n\nOnly specify `EC2` if you must clear a value that was previously set.", "title": "HealthCheckType", "type": "string" }, @@ -25054,7 +25058,7 @@ "properties": { "X12Details": { "$ref": "#/definitions/AWS::B2BI::Capability.X12Details", - "markdownDescription": "Returns the details for the EDI standard that is being used for the transformer. Currently, only X12 is supported. X12 is a set of standards and corresponding messages that define specific business documents.", + "markdownDescription": "", "title": "X12Details" } }, @@ -25329,18 +25333,12 @@ "additionalProperties": false, "properties": { "EdiType": { - "$ref": "#/definitions/AWS::B2BI::Transformer.EdiType", - "markdownDescription": "Returns the details for the EDI standard that is being used for the transformer. Currently, only X12 is supported. X12 is a set of standards and corresponding messages that define specific business documents.", - "title": "EdiType" + "$ref": "#/definitions/AWS::B2BI::Transformer.EdiType" }, "FileFormat": { - "markdownDescription": "Returns that the currently supported file formats for EDI transformations are `JSON` and `XML` .", - "title": "FileFormat", "type": "string" }, "MappingTemplate": { - "markdownDescription": "Returns a sample EDI document that is used by a transformer as a guide for processing the EDI data.", - "title": "MappingTemplate", "type": "string" }, "Name": { @@ -25349,8 +25347,6 @@ "type": "string" }, "SampleDocument": { - "markdownDescription": "Returns a sample EDI document that is used by a transformer as a guide for processing the EDI data.", - "title": "SampleDocument", "type": "string" }, "Status": { @@ -25401,9 +25397,7 @@ "additionalProperties": false, "properties": { "X12Details": { - "$ref": "#/definitions/AWS::B2BI::Transformer.X12Details", - "markdownDescription": "Returns the details for the EDI standard that is being used for the transformer. Currently, only X12 is supported. X12 is a set of standards and corresponding messages that define specific business documents.", - "title": "X12Details" + "$ref": "#/definitions/AWS::B2BI::Transformer.X12Details" } }, "required": [ @@ -26156,7 +26150,7 @@ "type": "object" }, "BackupVaultName": { - "markdownDescription": "The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the AWS Region where they are created. They consist of lowercase letters, numbers, and hyphens.", + "markdownDescription": "The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the AWS Region where they are created.", "title": "BackupVaultName", "type": "string" }, @@ -27623,7 +27617,7 @@ "items": { "$ref": "#/definitions/AWS::Batch::JobDefinition.EcsTaskProperties" }, - "markdownDescription": "An object that contains the properties for the Amazon ECS task definition of a job.\n\n> This object is currently limited to one element.", + "markdownDescription": "An object that contains the properties for the Amazon ECS task definition of a job.\n\n> This object is currently limited to one task element. However, the task element can run up to 10 containers.", "title": "TaskProperties", "type": "array" } @@ -28237,7 +28231,7 @@ "items": { "$ref": "#/definitions/AWS::Batch::JobDefinition.EksContainer" }, - "markdownDescription": "The properties of the container that's used on the Amazon EKS pod.", + "markdownDescription": "The properties of the container that's used on the Amazon EKS pod.\n\n> This object is limited to 10 elements.", "title": "Containers", "type": "array" }, @@ -28263,7 +28257,7 @@ "items": { "$ref": "#/definitions/AWS::Batch::JobDefinition.EksContainer" }, - "markdownDescription": "These containers run before application containers, always runs to completion, and must complete successfully before the next container starts. These containers are registered with the Amazon EKS Connector agent and persists the registration information in the Kubernetes backend data store. For more information, see [Init Containers](https://docs.aws.amazon.com/https://kubernetes.io/docs/concepts/workloads/pods/init-containers/) in the *Kubernetes documentation* .\n\n> This object is limited to 10 elements", + "markdownDescription": "These containers run before application containers, always runs to completion, and must complete successfully before the next container starts. These containers are registered with the Amazon EKS Connector agent and persists the registration information in the Kubernetes backend data store. For more information, see [Init Containers](https://docs.aws.amazon.com/https://kubernetes.io/docs/concepts/workloads/pods/init-containers/) in the *Kubernetes documentation* .\n\n> This object is limited to 10 elements.", "title": "InitContainers", "type": "array" }, @@ -29209,7 +29203,7 @@ "additionalProperties": false, "properties": { "OverrideLambda": { - "markdownDescription": "The ARN of the Lambda function to use when parsing the raw foundation model output in parts of the agent sequence. If you specify this field, at least one of the `promptConfigurations` must contain a `parserMode` value that is set to `OVERRIDDEN` . For more information, see [Parser Lambda function in Agents for Amazon Bedrock](https://docs.aws.amazon.com/bedrock/latest/userguide/lambda-parser.html) .", + "markdownDescription": "The ARN of the Lambda function to use when parsing the raw foundation model output in parts of the agent sequence. If you specify this field, at least one of the `promptConfigurations` must contain a `parserMode` value that is set to `OVERRIDDEN` . For more information, see [Parser Lambda function in Amazon Bedrock Agents](https://docs.aws.amazon.com/bedrock/latest/userguide/lambda-parser.html) .", "title": "OverrideLambda", "type": "string" }, @@ -30020,7 +30014,7 @@ }, "VectorKnowledgeBaseConfiguration": { "$ref": "#/definitions/AWS::Bedrock::KnowledgeBase.VectorKnowledgeBaseConfiguration", - "markdownDescription": "Contains details about the embeddings model that'sused to convert the data source.", + "markdownDescription": "Contains details about the model that's used to convert the data source into vector embeddings.", "title": "VectorKnowledgeBaseConfiguration" } }, @@ -32636,7 +32630,7 @@ "type": "array" }, "TeamId": { - "markdownDescription": "The ID of the Microsoft Team authorized with AWS Chatbot .\n\nTo get the team ID, you must perform the initial authorization flow with Microsoft Teams in the AWS Chatbot console. Then you can copy and paste the team ID from the console. For more details, see steps 1-4 in [Get started with Microsoft Teams](https://docs.aws.amazon.com/chatbot/latest/adminguide/teams-setup.html#teams-client-setup) in the *AWS Chatbot Administrator Guide* .", + "markdownDescription": "The ID of the Microsoft Team authorized with AWS Chatbot .\n\nTo get the team ID, you must perform the initial authorization flow with Microsoft Teams in the AWS Chatbot console. Then you can copy and paste the team ID from the console. For more details, see steps 1-3 in [Get started with Microsoft Teams](https://docs.aws.amazon.com/chatbot/latest/adminguide/teams-setup.html#teams-client-setup) in the *AWS Chatbot Administrator Guide* .", "title": "TeamId", "type": "string" }, @@ -32745,12 +32739,12 @@ "type": "string" }, "SlackChannelId": { - "markdownDescription": "The ID of the Slack channel.\n\nTo get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, `ABCBBLZZZ` .", + "markdownDescription": "The ID of the Slack channel.\n\nTo get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the character string at the end of the URL. For example, `ABCBBLZZZ` .", "title": "SlackChannelId", "type": "string" }, "SlackWorkspaceId": { - "markdownDescription": "The ID of the Slack workspace authorized with AWS Chatbot .\n\nTo get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console. Then you can copy and paste the workspace ID from the console. For more details, see steps 1-4 in [Setting Up AWS Chatbot with Slack](https://docs.aws.amazon.com/chatbot/latest/adminguide/setting-up.html#Setup_intro) in the *AWS Chatbot User Guide* .", + "markdownDescription": "The ID of the Slack workspace authorized with AWS Chatbot .\n\nTo get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console. Then you can copy and paste the workspace ID from the console. For more details, see steps 1-3 in [Tutorial: Get started with Slack](https://docs.aws.amazon.com/chatbot/latest/adminguide/slack-setup.html) in the *AWS Chatbot User Guide* .", "title": "SlackWorkspaceId", "type": "string" }, @@ -33226,7 +33220,7 @@ "items": { "$ref": "#/definitions/AWS::CleanRooms::ConfiguredTable.AnalysisRule" }, - "markdownDescription": "The entire created analysis rule.", + "markdownDescription": "The analysis rule that was created for the configured table.", "title": "AnalysisRules", "type": "array" }, @@ -33790,7 +33784,7 @@ "properties": { "S3": { "$ref": "#/definitions/AWS::CleanRooms::Membership.ProtectedQueryS3OutputConfiguration", - "markdownDescription": "Required configuration for a protected query with an `S3` output type.", + "markdownDescription": "Required configuration for a protected query with an `s3` output type.", "title": "S3" } }, @@ -33904,7 +33898,7 @@ }, "Parameters": { "$ref": "#/definitions/AWS::CleanRooms::PrivacyBudgetTemplate.Parameters", - "markdownDescription": "Specifies the epislon and noise parameters for the privacy budget template.", + "markdownDescription": "Specifies the epsilon and noise parameters for the privacy budget template.", "title": "Parameters" }, "PrivacyBudgetType": { @@ -34485,7 +34479,7 @@ "additionalProperties": false, "properties": { "Configuration": { - "markdownDescription": "Specifies the activated hook type configuration, in this AWS account and AWS Region .\n\nYou must specify either `TypeName` and `Configuration` or `TypeARN` and `Configuration` .", + "markdownDescription": "Specifies the activated hook type configuration, in this AWS account and AWS Region .\n\nYou must specify either `TypeName` and `Configuration` or `TypeArn` and `Configuration` .", "title": "Configuration", "type": "string" }, @@ -34495,12 +34489,12 @@ "type": "string" }, "TypeArn": { - "markdownDescription": "The Amazon Resource Number (ARN) for the hook to set `Configuration` for.\n\nYou must specify either `TypeName` and `Configuration` or `TypeARN` and `Configuration` .", + "markdownDescription": "The Amazon Resource Number (ARN) for the hook to set `Configuration` for.\n\nYou must specify either `TypeName` and `Configuration` or `TypeArn` and `Configuration` .", "title": "TypeArn", "type": "string" }, "TypeName": { - "markdownDescription": "The unique name for your hook. Specifies a three-part namespace for your hook, with a recommended pattern of `Organization::Service::Hook` .\n\nYou must specify either `TypeName` and `Configuration` or `TypeARN` and `Configuration` .", + "markdownDescription": "The unique name for your hook. Specifies a three-part namespace for your hook, with a recommended pattern of `Organization::Service::Hook` .\n\nYou must specify either `TypeName` and `Configuration` or `TypeArn` and `Configuration` .", "title": "TypeName", "type": "string" } @@ -34910,7 +34904,7 @@ "type": "string" }, "PublicVersionNumber": { - "markdownDescription": "The version number to assign to this version of the extension.\n\nUse the following format, and adhere to semantic versioning when assigning a version number to your extension:\n\n`MAJOR.MINOR.PATCH`\n\nFor more information, see [Semantic Versioning 2.0.0](https://docs.aws.amazon.com/https://semver.org/) .\n\nIf you don't specify a version number, CloudFormation increments the version number by one minor version release.\n\nYou cannot specify a version number the first time you publish a type. AWS CloudFormation automatically sets the first version number to be `1.0.0` .", + "markdownDescription": "The version number to assign to this version of the extension.\n\nUse the following format, and adhere to semantic versioning when assigning a version number to your extension:\n\n`MAJOR.MINOR.PATCH`\n\nFor more information, see [Semantic Versioning 2.0.0](https://docs.aws.amazon.com/https://semver.org/) .\n\nIf you don't specify a version number, CloudFormation increments the version number by one minor version release.\n\nYou cannot specify a version number the first time you publish a type. CloudFormation automatically sets the first version number to be `1.0.0` .", "title": "PublicVersionNumber", "type": "string" }, @@ -34988,7 +34982,7 @@ "type": "boolean" }, "ConnectionArn": { - "markdownDescription": "If you are using a Bitbucket or GitHub account for identity verification, the Amazon Resource Name (ARN) for your connection to that account.\n\nFor more information, see [Registering your account to publish CloudFormation extensions](https://docs.aws.amazon.com/cloudformation-cli/latest/userguide/publish-extension.html#publish-extension-prereqs) in the *CloudFormation CLI User Guide* .", + "markdownDescription": "If you are using a Bitbucket or GitHub account for identity verification, the Amazon Resource Name (ARN) for your connection to that account.\n\nFor more information, see [Prerequisite: Registering your account to publish CloudFormation extensions](https://docs.aws.amazon.com/cloudformation-cli/latest/userguide/publish-extension.html#publish-extension-prereqs) in the *AWS CloudFormation Command Line Interface (CLI) User Guide* .", "title": "ConnectionArn", "type": "string" } @@ -35230,7 +35224,7 @@ "items": { "type": "string" }, - "markdownDescription": "The Amazon Simple Notification Service (Amazon SNS) topic ARNs to publish stack related events. You can find your Amazon SNS topic ARNs using the Amazon SNS console or your Command Line Interface (CLI).", + "markdownDescription": "The Amazon SNS topic ARNs to publish stack related events. You can find your Amazon SNS topic ARNs using the Amazon SNS console or your Command Line Interface (CLI).", "title": "NotificationARNs", "type": "array" }, @@ -35249,7 +35243,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "Key-value pairs to associate with this stack. AWS CloudFormation also propagates these tags to the resources created in the stack. A maximum number of 50 tags can be specified.", + "markdownDescription": "Key-value pairs to associate with this stack. CloudFormation also propagates these tags to the resources created in the stack. A maximum number of 50 tags can be specified.", "title": "Tags", "type": "array" }, @@ -35398,7 +35392,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "Key-value pairs to associate with this stack. AWS CloudFormation also propagates these tags to supported resources in the stack. You can specify a maximum number of 50 tags.\n\nIf you don't specify this parameter, AWS CloudFormation doesn't modify the stack's tags. If you specify an empty value, AWS CloudFormation removes all associated tags.", + "markdownDescription": "Key-value pairs to associate with this stack. CloudFormation also propagates these tags to supported resources in the stack. You can specify a maximum number of 50 tags.\n\nIf you don't specify this parameter, CloudFormation doesn't modify the stack's tags. If you specify an empty value, CloudFormation removes all associated tags.", "title": "Tags", "type": "array" }, @@ -35460,7 +35454,7 @@ "additionalProperties": false, "properties": { "AccountFilterType": { - "markdownDescription": "Limit deployment targets to individual accounts or include additional accounts with provided OUs.\n\nThe following is a list of possible values for the `AccountFilterType` operation.\n\n- `INTERSECTION` : StackSets deploys to the accounts specified in `Accounts` parameter.\n- `DIFFERENCE` : StackSets excludes the accounts specified in `Accounts` parameter. This enables user to avoid certain accounts within an OU such as suspended accounts.\n- `UNION` : StackSets includes additional accounts deployment targets.\n\nThis is the default value if `AccountFilterType` is not provided. This enables user to update an entire OU and individual accounts from a different OU in one request, which used to be two separate requests.\n- `NONE` : Deploys to all the accounts in specified organizational units (OU).", + "markdownDescription": "Limit deployment targets to individual accounts or include additional accounts with provided OUs.\n\nThe following is a list of possible values for the `AccountFilterType` operation.\n\n- `INTERSECTION` : StackSet deploys to the accounts specified in the `Accounts` parameter.\n- `DIFFERENCE` : StackSet deploys to the OU, excluding the accounts specified in the `Accounts` parameter.\n- `UNION` StackSet deploys to the OU, and the accounts specified in the `Accounts` parameter. `UNION` is not supported for create operations when using StackSet as a resource.", "title": "AccountFilterType", "type": "string" }, @@ -36294,12 +36288,12 @@ "additionalProperties": false, "properties": { "Header": { - "markdownDescription": "", + "markdownDescription": "The name of the HTTP header that CloudFront uses to configure for the single header policy.", "title": "Header", "type": "string" }, "Value": { - "markdownDescription": "", + "markdownDescription": "Specifies the value to assign to the header for a single header policy.", "title": "Value", "type": "string" } @@ -36334,11 +36328,11 @@ "properties": { "SessionStickinessConfig": { "$ref": "#/definitions/AWS::CloudFront::ContinuousDeploymentPolicy.SessionStickinessConfig", - "markdownDescription": "", + "markdownDescription": "Enable session stickiness for the associated origin or cache settings.", "title": "SessionStickinessConfig" }, "Weight": { - "markdownDescription": "", + "markdownDescription": "The percentage of requests that CloudFront will use to send to an associated origin or cache settings.", "title": "Weight", "type": "number" } @@ -36807,7 +36801,7 @@ "items": { "type": "string" }, - "markdownDescription": "", + "markdownDescription": "An alias for the CloudFront distribution's domain name.\n\n> This property is legacy. We recommend that you use [Aliases](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudfront-distribution-distributionconfig.html#cfn-cloudfront-distribution-distributionconfig-aliases) instead.", "title": "CNAMEs", "type": "array" }, @@ -36839,7 +36833,7 @@ }, "CustomOrigin": { "$ref": "#/definitions/AWS::CloudFront::Distribution.LegacyCustomOrigin", - "markdownDescription": "", + "markdownDescription": "The user-defined HTTP server that serves as the origin for content that CloudFront distributes.\n\n> This property is legacy. We recommend that you use [Origin](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudfront-distribution-origin.html) instead.", "title": "CustomOrigin" }, "DefaultCacheBehavior": { @@ -36897,7 +36891,7 @@ }, "S3Origin": { "$ref": "#/definitions/AWS::CloudFront::Distribution.LegacyS3Origin", - "markdownDescription": "", + "markdownDescription": "The origin as an Amazon S3 bucket.\n\n> This property is legacy. We recommend that you use [Origin](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudfront-distribution-origin.html) instead.", "title": "S3Origin" }, "Staging": { @@ -37020,22 +37014,22 @@ "additionalProperties": false, "properties": { "DNSName": { - "markdownDescription": "", + "markdownDescription": "The domain name assigned to your CloudFront distribution.", "title": "DNSName", "type": "string" }, "HTTPPort": { - "markdownDescription": "", + "markdownDescription": "The HTTP port that CloudFront uses to connect to the origin. Specify the HTTP port that the origin listens on.", "title": "HTTPPort", "type": "number" }, "HTTPSPort": { - "markdownDescription": "", + "markdownDescription": "The HTTPS port that CloudFront uses to connect to the origin. Specify the HTTPS port that the origin listens on.", "title": "HTTPSPort", "type": "number" }, "OriginProtocolPolicy": { - "markdownDescription": "", + "markdownDescription": "Specifies the protocol (HTTP or HTTPS) that CloudFront uses to connect to the origin.", "title": "OriginProtocolPolicy", "type": "string" }, @@ -37043,7 +37037,7 @@ "items": { "type": "string" }, - "markdownDescription": "", + "markdownDescription": "The minimum SSL/TLS protocol version that CloudFront uses when communicating with your origin server over HTTPs.\n\nFor more information, see [Minimum Origin SSL Protocol](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-values-specify.html#DownloadDistValuesOriginSSLProtocols) in the *Amazon CloudFront Developer Guide* .", "title": "OriginSSLProtocols", "type": "array" } @@ -37059,12 +37053,12 @@ "additionalProperties": false, "properties": { "DNSName": { - "markdownDescription": "", + "markdownDescription": "The domain name assigned to your CloudFront distribution.", "title": "DNSName", "type": "string" }, "OriginAccessIdentity": { - "markdownDescription": "", + "markdownDescription": "The CloudFront origin access identity to associate with the distribution. Use an origin access identity to configure the distribution so that end users can only access objects in an Amazon S3 through CloudFront .\n\n> This property is legacy. We recommend that you use [OriginAccessControl](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudfront-originaccesscontrol.html) instead.", "title": "OriginAccessIdentity", "type": "string" } @@ -39277,7 +39271,7 @@ "type": "array" }, "Field": { - "markdownDescription": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `readOnly` , `eventCategory` , and `eventSource` .\n\nFor CloudTrail data events, supported fields include `readOnly` , `eventCategory` , `eventName` , `resources.type` , and `resources.ARN` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - Optional. Can be set to `Equals` a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - For filtering management events only. This can be set to `NotEquals` `kms.amazonaws.com` or `NotEquals` `rdsdata.amazonaws.com` .\n- *`eventName`* - Can use any operator. You can use it to \ufb01lter in or \ufb01lter out any data event logged to CloudTrail, such as `PutBucket` or `GetSnapshotBlock` . You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator, and the value can be one of the following:\n\n- `AWS::AppConfig::Configuration`\n- `AWS::B2BI::Transformer`\n- `AWS::Bedrock::AgentAlias`\n- `AWS::Bedrock::FlowAlias`\n- `AWS::Bedrock::Guardrail`\n- `AWS::Bedrock::KnowledgeBase`\n- `AWS::Cassandra::Table`\n- `AWS::CloudFront::KeyValueStore`\n- `AWS::CloudTrail::Channel`\n- `AWS::CloudWatch::Metric`\n- `AWS::CodeWhisperer::Customization`\n- `AWS::CodeWhisperer::Profile`\n- `AWS::Cognito::IdentityPool`\n- `AWS::DynamoDB::Stream`\n- `AWS::DynamoDB::Table`\n- `AWS::EC2::Snapshot`\n- `AWS::EMRWAL::Workspace`\n- `AWS::FinSpace::Environment`\n- `AWS::Glue::Table`\n- `AWS::GreengrassV2::ComponentVersion`\n- `AWS::GreengrassV2::Deployment`\n- `AWS::GuardDuty::Detector`\n- `AWS::IoT::Certificate`\n- `AWS::IoT::Thing`\n- `AWS::IoTSiteWise::Asset`\n- `AWS::IoTSiteWise::TimeSeries`\n- `AWS::IoTTwinMaker::Entity`\n- `AWS::IoTTwinMaker::Workspace`\n- `AWS::KendraRanking::ExecutionPlan`\n- `AWS::Kinesis::Stream`\n- `AWS::Kinesis::StreamConsumer`\n- `AWS::KinesisVideo::Stream`\n- `AWS::Lambda::Function`\n- `AWS::MachineLearning::MlModel`\n- `AWS::ManagedBlockchain::Network`\n- `AWS::ManagedBlockchain::Node`\n- `AWS::MedicalImaging::Datastore`\n- `AWS::NeptuneGraph::Graph`\n- `AWS::PaymentCryptography::Alias`\n- `AWS::PaymentCryptography::Key`\n- `AWS::PCAConnectorAD::Connector`\n- `AWS::PCAConnectorSCEP::Connector`\n- `AWS::QApps:QApp`\n- `AWS::QBusiness::Application`\n- `AWS::QBusiness::DataSource`\n- `AWS::QBusiness::Index`\n- `AWS::QBusiness::WebExperience`\n- `AWS::RDS::DBCluster`\n- `AWS::S3::AccessPoint`\n- `AWS::S3::Object`\n- `AWS::S3Express::Object`\n- `AWS::S3ObjectLambda::AccessPoint`\n- `AWS::S3Outposts::Object`\n- `AWS::SageMaker::Endpoint`\n- `AWS::SageMaker::ExperimentTrialComponent`\n- `AWS::SageMaker::FeatureGroup`\n- `AWS::ServiceDiscovery::Namespace`\n- `AWS::ServiceDiscovery::Service`\n- `AWS::SCN::Instance`\n- `AWS::SNS::PlatformEndpoint`\n- `AWS::SNS::Topic`\n- `AWS::SQS::Queue`\n- `AWS::SSM::ManagedNode`\n- `AWS::SSMMessages::ControlChannel`\n- `AWS::StepFunctions::StateMachine`\n- `AWS::SWF::Domain`\n- `AWS::ThinClient::Device`\n- `AWS::ThinClient::Environment`\n- `AWS::Timestream::Database`\n- `AWS::Timestream::Table`\n- `AWS::VerifiedPermissions::PolicyStore`\n- `AWS::XRay::Trace`\n\nYou can have only one `resources.type` \ufb01eld per selector. To log data events on more than one resource type, add another selector.\n- *`resources.ARN`* - You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value. For information about filtering on the `resources.ARN` field, see [Filtering data events by resources.ARN](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/filtering-data-events.html#filtering-data-events-resourcearn) in the *AWS CloudTrail User Guide* .\n\n> You can't use the `resources.ARN` field to filter resource types that do not have ARNs.", + "markdownDescription": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `eventCategory` (required), `eventSource` , and `readOnly` .\n\nFor CloudTrail data events, supported fields include `eventCategory` (required), `resources.type` (required), `eventName` , `readOnly` , and `resources.ARN` .\n\nFor CloudTrail network activity events, supported fields include `eventCategory` (required), `eventSource` (required), `eventName` , `errorCode` , and `vpcEndpointId` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - This is an optional field that is only used for management events and data events. This field can be set to `Equals` with a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - This field is only used for management events and network activity events.\n\nFor management events, this is an optional field that can be set to `NotEquals` `kms.amazonaws.com` to exclude KMS management events, or `NotEquals` `rdsdata.amazonaws.com` to exclude RDS management events.\n\nFor network activity events, this is a required field that only uses the `Equals` operator. Set this field to the event source for which you want to log network activity events. If you want to log network activity events for multiple event sources, you must create a separate field selector for each event source.\n\nThe following are valid values for network activity events:\n\n- `cloudtrail.amazonaws.com`\n- `ec2.amazonaws.com`\n- `kms.amazonaws.com`\n- `secretsmanager.amazonaws.com`\n- *`eventName`* - This is an optional field that is only used for data events and network activity events. You can use any operator with `eventName` . You can use it to \ufb01lter in or \ufb01lter out specific events. You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This field is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n- For CloudTrail network activity events, the value must be `NetworkActivity` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`errorCode`* - This \ufb01eld is only used to filter CloudTrail network activity events and is optional. This is the error code to filter on. Currently, the only valid `errorCode` is `VpceAccessDenied` . `errorCode` can only use the `Equals` operator.\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator.\n\nFor a list of available resource types for data events, see [Data events](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-data-events-with-cloudtrail.html#logging-data-events) in the *AWS CloudTrail User Guide* .\n\nYou can have only one `resources.type` \ufb01eld per selector. To log events on more than one resource type, add another selector.\n- *`resources.ARN`* - The `resources.ARN` is an optional field for data events. You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value.\n\nFor information about filtering data events on the `resources.ARN` field, see [Filtering data events by resources.ARN](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/filtering-data-events.html#filtering-data-events-resourcearn) in the *AWS CloudTrail User Guide* .\n\n> You can't use the `resources.ARN` field to filter resource types that do not have ARNs.\n- *`vpcEndpointId`* - This \ufb01eld is only used to filter CloudTrail network activity events and is optional. This field identifies the VPC endpoint that the request passed through. You can use any operator with `vpcEndpointId` .", "title": "Field", "type": "string" }, @@ -39442,7 +39436,7 @@ "items": { "$ref": "#/definitions/AWS::CloudTrail::Trail.AdvancedEventSelector" }, - "markdownDescription": "Specifies the settings for advanced event selectors. You can add advanced event selectors, and conditions for your advanced event selectors, up to a maximum of 500 values for all conditions and selectors on a trail. You can use either `AdvancedEventSelectors` or `EventSelectors` , but not both. If you apply `AdvancedEventSelectors` to a trail, any existing `EventSelectors` are overwritten. For more information about advanced event selectors, see [Logging data events](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-data-events-with-cloudtrail.html) in the *AWS CloudTrail User Guide* .", + "markdownDescription": "Specifies the settings for advanced event selectors. You can use advanced event selectors to log management events, data events for all resource types, and network activity events.\n\nYou can add advanced event selectors, and conditions for your advanced event selectors, up to a maximum of 500 values for all conditions and selectors on a trail. You can use either `AdvancedEventSelectors` or `EventSelectors` , but not both. If you apply `AdvancedEventSelectors` to a trail, any existing `EventSelectors` are overwritten. For more information about advanced event selectors, see [Logging data events](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-data-events-with-cloudtrail.html) and [Logging network activity events](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-network-events-with-cloudtrail.html) in the *AWS CloudTrail User Guide* .", "title": "AdvancedEventSelectors", "type": "array" }, @@ -39600,7 +39594,7 @@ "type": "array" }, "Field": { - "markdownDescription": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `readOnly` , `eventCategory` , and `eventSource` .\n\nFor CloudTrail data events, supported fields include `readOnly` , `eventCategory` , `eventName` , `resources.type` , and `resources.ARN` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - Optional. Can be set to `Equals` a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - For filtering management events only. This can be set to `NotEquals` `kms.amazonaws.com` or `NotEquals` `rdsdata.amazonaws.com` .\n- *`eventName`* - Can use any operator. You can use it to \ufb01lter in or \ufb01lter out any data event logged to CloudTrail, such as `PutBucket` or `GetSnapshotBlock` . You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator, and the value can be one of the following:\n\n- `AWS::AppConfig::Configuration`\n- `AWS::B2BI::Transformer`\n- `AWS::Bedrock::AgentAlias`\n- `AWS::Bedrock::FlowAlias`\n- `AWS::Bedrock::Guardrail`\n- `AWS::Bedrock::KnowledgeBase`\n- `AWS::Cassandra::Table`\n- `AWS::CloudFront::KeyValueStore`\n- `AWS::CloudTrail::Channel`\n- `AWS::CloudWatch::Metric`\n- `AWS::CodeWhisperer::Customization`\n- `AWS::CodeWhisperer::Profile`\n- `AWS::Cognito::IdentityPool`\n- `AWS::DynamoDB::Stream`\n- `AWS::DynamoDB::Table`\n- `AWS::EC2::Snapshot`\n- `AWS::EMRWAL::Workspace`\n- `AWS::FinSpace::Environment`\n- `AWS::Glue::Table`\n- `AWS::GreengrassV2::ComponentVersion`\n- `AWS::GreengrassV2::Deployment`\n- `AWS::GuardDuty::Detector`\n- `AWS::IoT::Certificate`\n- `AWS::IoT::Thing`\n- `AWS::IoTSiteWise::Asset`\n- `AWS::IoTSiteWise::TimeSeries`\n- `AWS::IoTTwinMaker::Entity`\n- `AWS::IoTTwinMaker::Workspace`\n- `AWS::KendraRanking::ExecutionPlan`\n- `AWS::Kinesis::Stream`\n- `AWS::Kinesis::StreamConsumer`\n- `AWS::KinesisVideo::Stream`\n- `AWS::Lambda::Function`\n- `AWS::MachineLearning::MlModel`\n- `AWS::ManagedBlockchain::Network`\n- `AWS::ManagedBlockchain::Node`\n- `AWS::MedicalImaging::Datastore`\n- `AWS::NeptuneGraph::Graph`\n- `AWS::PaymentCryptography::Alias`\n- `AWS::PaymentCryptography::Key`\n- `AWS::PCAConnectorAD::Connector`\n- `AWS::PCAConnectorSCEP::Connector`\n- `AWS::QApps:QApp`\n- `AWS::QBusiness::Application`\n- `AWS::QBusiness::DataSource`\n- `AWS::QBusiness::Index`\n- `AWS::QBusiness::WebExperience`\n- `AWS::RDS::DBCluster`\n- `AWS::S3::AccessPoint`\n- `AWS::S3::Object`\n- `AWS::S3Express::Object`\n- `AWS::S3ObjectLambda::AccessPoint`\n- `AWS::S3Outposts::Object`\n- `AWS::SageMaker::Endpoint`\n- `AWS::SageMaker::ExperimentTrialComponent`\n- `AWS::SageMaker::FeatureGroup`\n- `AWS::ServiceDiscovery::Namespace`\n- `AWS::ServiceDiscovery::Service`\n- `AWS::SCN::Instance`\n- `AWS::SNS::PlatformEndpoint`\n- `AWS::SNS::Topic`\n- `AWS::SQS::Queue`\n- `AWS::SSM::ManagedNode`\n- `AWS::SSMMessages::ControlChannel`\n- `AWS::StepFunctions::StateMachine`\n- `AWS::SWF::Domain`\n- `AWS::ThinClient::Device`\n- `AWS::ThinClient::Environment`\n- `AWS::Timestream::Database`\n- `AWS::Timestream::Table`\n- `AWS::VerifiedPermissions::PolicyStore`\n- `AWS::XRay::Trace`\n\nYou can have only one `resources.type` \ufb01eld per selector. To log data events on more than one resource type, add another selector.\n- *`resources.ARN`* - You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value. For information about filtering on the `resources.ARN` field, see [Filtering data events by resources.ARN](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/filtering-data-events.html#filtering-data-events-resourcearn) in the *AWS CloudTrail User Guide* .\n\n> You can't use the `resources.ARN` field to filter resource types that do not have ARNs.", + "markdownDescription": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `eventCategory` (required), `eventSource` , and `readOnly` .\n\nFor CloudTrail data events, supported fields include `eventCategory` (required), `resources.type` (required), `eventName` , `readOnly` , and `resources.ARN` .\n\nFor CloudTrail network activity events, supported fields include `eventCategory` (required), `eventSource` (required), `eventName` , `errorCode` , and `vpcEndpointId` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - This is an optional field that is only used for management events and data events. This field can be set to `Equals` with a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - This field is only used for management events and network activity events.\n\nFor management events, this is an optional field that can be set to `NotEquals` `kms.amazonaws.com` to exclude KMS management events, or `NotEquals` `rdsdata.amazonaws.com` to exclude RDS management events.\n\nFor network activity events, this is a required field that only uses the `Equals` operator. Set this field to the event source for which you want to log network activity events. If you want to log network activity events for multiple event sources, you must create a separate field selector for each event source.\n\nThe following are valid values for network activity events:\n\n- `cloudtrail.amazonaws.com`\n- `ec2.amazonaws.com`\n- `kms.amazonaws.com`\n- `secretsmanager.amazonaws.com`\n- *`eventName`* - This is an optional field that is only used for data events and network activity events. You can use any operator with `eventName` . You can use it to \ufb01lter in or \ufb01lter out specific events. You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This field is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n- For CloudTrail network activity events, the value must be `NetworkActivity` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`errorCode`* - This \ufb01eld is only used to filter CloudTrail network activity events and is optional. This is the error code to filter on. Currently, the only valid `errorCode` is `VpceAccessDenied` . `errorCode` can only use the `Equals` operator.\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator.\n\nFor a list of available resource types for data events, see [Data events](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-data-events-with-cloudtrail.html#logging-data-events) in the *AWS CloudTrail User Guide* .\n\nYou can have only one `resources.type` \ufb01eld per selector. To log events on more than one resource type, add another selector.\n- *`resources.ARN`* - The `resources.ARN` is an optional field for data events. You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value.\n\nFor information about filtering data events on the `resources.ARN` field, see [Filtering data events by resources.ARN](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/filtering-data-events.html#filtering-data-events-resourcearn) in the *AWS CloudTrail User Guide* .\n\n> You can't use the `resources.ARN` field to filter resource types that do not have ARNs.\n- *`vpcEndpointId`* - This \ufb01eld is only used to filter CloudTrail network activity events and is optional. This field identifies the VPC endpoint that the request passed through. You can use any operator with `vpcEndpointId` .", "title": "Field", "type": "string" }, @@ -39646,7 +39640,7 @@ "additionalProperties": false, "properties": { "Type": { - "markdownDescription": "The resource type in which you want to log data events. You can specify the following *basic* event selector resource types:\n\n- `AWS::DynamoDB::Table`\n- `AWS::Lambda::Function`\n- `AWS::S3::Object`\n\nAdditional resource types are available through *advanced* event selectors. For more information about these additional resource types, see [AdvancedFieldSelector](https://docs.aws.amazon.com/awscloudtrail/latest/APIReference/API_AdvancedFieldSelector.html) .", + "markdownDescription": "The resource type in which you want to log data events. You can specify the following *basic* event selector resource types:\n\n- `AWS::DynamoDB::Table`\n- `AWS::Lambda::Function`\n- `AWS::S3::Object`\n\nAdditional resource types are available through *advanced* event selectors. For more information, see [AdvancedEventSelector](https://docs.aws.amazon.com/awscloudtrail/latest/APIReference/API_AdvancedEventSelector.html) .", "title": "Type", "type": "string" }, @@ -39654,7 +39648,7 @@ "items": { "type": "string" }, - "markdownDescription": "An array of Amazon Resource Name (ARN) strings or partial ARN strings for the specified resource type.\n\n- To log data events for all objects in all S3 buckets in your AWS account , specify the prefix as `arn:aws:s3` .\n\n> This also enables logging of data event activity performed by any user or role in your AWS account , even if that activity is performed on a bucket that belongs to another AWS account .\n- To log data events for all objects in an S3 bucket, specify the bucket and an empty object prefix such as `arn:aws:s3:::DOC-EXAMPLE-BUCKET1/` . The trail logs data events for all objects in this S3 bucket.\n- To log data events for specific objects, specify the S3 bucket and object prefix such as `arn:aws:s3:::DOC-EXAMPLE-BUCKET1/example-images` . The trail logs data events for objects in this S3 bucket that match the prefix.\n- To log data events for all Lambda functions in your AWS account , specify the prefix as `arn:aws:lambda` .\n\n> This also enables logging of `Invoke` activity performed by any user or role in your AWS account , even if that activity is performed on a function that belongs to another AWS account .\n- To log data events for a specific Lambda function, specify the function ARN.\n\n> Lambda function ARNs are exact. For example, if you specify a function ARN *arn:aws:lambda:us-west-2:111111111111:function:helloworld* , data events will only be logged for *arn:aws:lambda:us-west-2:111111111111:function:helloworld* . They will not be logged for *arn:aws:lambda:us-west-2:111111111111:function:helloworld2* .\n- To log data events for all DynamoDB tables in your AWS account , specify the prefix as `arn:aws:dynamodb` .", + "markdownDescription": "An array of Amazon Resource Name (ARN) strings or partial ARN strings for the specified resource type.\n\n- To log data events for all objects in all S3 buckets in your AWS account , specify the prefix as `arn:aws:s3` .\n\n> This also enables logging of data event activity performed by any user or role in your AWS account , even if that activity is performed on a bucket that belongs to another AWS account .\n- To log data events for all objects in an S3 bucket, specify the bucket and an empty object prefix such as `arn:aws:s3:::amzn-s3-demo-bucket1/` . The trail logs data events for all objects in this S3 bucket.\n- To log data events for specific objects, specify the S3 bucket and object prefix such as `arn:aws:s3:::amzn-s3-demo-bucket1/example-images` . The trail logs data events for objects in this S3 bucket that match the prefix.\n- To log data events for all Lambda functions in your AWS account , specify the prefix as `arn:aws:lambda` .\n\n> This also enables logging of `Invoke` activity performed by any user or role in your AWS account , even if that activity is performed on a function that belongs to another AWS account .\n- To log data events for a specific Lambda function, specify the function ARN.\n\n> Lambda function ARNs are exact. For example, if you specify a function ARN *arn:aws:lambda:us-west-2:111111111111:function:helloworld* , data events will only be logged for *arn:aws:lambda:us-west-2:111111111111:function:helloworld* . They will not be logged for *arn:aws:lambda:us-west-2:111111111111:function:helloworld2* .\n- To log data events for all DynamoDB tables in your AWS account , specify the prefix as `arn:aws:dynamodb` .", "title": "Values", "type": "array" } @@ -41219,12 +41213,12 @@ "type": "number" }, "ComputeType": { - "markdownDescription": "Information about the compute resources the compute fleet uses. Available values include:\n\n- `BUILD_GENERAL1_SMALL` : Use up to 3 GB memory and 2 vCPUs for builds.\n- `BUILD_GENERAL1_MEDIUM` : Use up to 7 GB memory and 4 vCPUs for builds.\n- `BUILD_GENERAL1_LARGE` : Use up to 16 GB memory and 8 vCPUs for builds, depending on your environment type.\n- `BUILD_GENERAL1_XLARGE` : Use up to 70 GB memory and 36 vCPUs for builds, depending on your environment type.\n- `BUILD_GENERAL1_2XLARGE` : Use up to 145 GB memory, 72 vCPUs, and 824 GB of SSD storage for builds. This compute type supports Docker images up to 100 GB uncompressed.\n\nIf you use `BUILD_GENERAL1_SMALL` :\n\n- For environment type `LINUX_CONTAINER` , you can use up to 3 GB memory and 2 vCPUs for builds.\n- For environment type `LINUX_GPU_CONTAINER` , you can use up to 16 GB memory, 4 vCPUs, and 1 NVIDIA A10G Tensor Core GPU for builds.\n- For environment type `ARM_CONTAINER` , you can use up to 4 GB memory and 2 vCPUs on ARM-based processors for builds.\n\nIf you use `BUILD_GENERAL1_LARGE` :\n\n- For environment type `LINUX_CONTAINER` , you can use up to 15 GB memory and 8 vCPUs for builds.\n- For environment type `LINUX_GPU_CONTAINER` , you can use up to 255 GB memory, 32 vCPUs, and 4 NVIDIA Tesla V100 GPUs for builds.\n- For environment type `ARM_CONTAINER` , you can use up to 16 GB memory and 8 vCPUs on ARM-based processors for builds.\n\nFor more information, see [Build environment compute types](https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-compute-types.html) in the *AWS CodeBuild User Guide.*", + "markdownDescription": "> Updating this field is not allowed for `MAC_ARM` . \n\nInformation about the compute resources the compute fleet uses. Available values include:\n\n- `BUILD_GENERAL1_SMALL` : Use up to 3 GB memory and 2 vCPUs for builds.\n- `BUILD_GENERAL1_MEDIUM` : Use up to 7 GB memory and 4 vCPUs for builds.\n- `BUILD_GENERAL1_LARGE` : Use up to 16 GB memory and 8 vCPUs for builds, depending on your environment type.\n- `BUILD_GENERAL1_XLARGE` : Use up to 70 GB memory and 36 vCPUs for builds, depending on your environment type.\n- `BUILD_GENERAL1_2XLARGE` : Use up to 145 GB memory, 72 vCPUs, and 824 GB of SSD storage for builds. This compute type supports Docker images up to 100 GB uncompressed.\n\nIf you use `BUILD_GENERAL1_SMALL` :\n\n- For environment type `LINUX_CONTAINER` , you can use up to 3 GB memory and 2 vCPUs for builds.\n- For environment type `LINUX_GPU_CONTAINER` , you can use up to 16 GB memory, 4 vCPUs, and 1 NVIDIA A10G Tensor Core GPU for builds.\n- For environment type `ARM_CONTAINER` , you can use up to 4 GB memory and 2 vCPUs on ARM-based processors for builds.\n\nIf you use `BUILD_GENERAL1_LARGE` :\n\n- For environment type `LINUX_CONTAINER` , you can use up to 15 GB memory and 8 vCPUs for builds.\n- For environment type `LINUX_GPU_CONTAINER` , you can use up to 255 GB memory, 32 vCPUs, and 4 NVIDIA Tesla V100 GPUs for builds.\n- For environment type `ARM_CONTAINER` , you can use up to 16 GB memory and 8 vCPUs on ARM-based processors for builds.\n\nFor more information, see [Build environment compute types](https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-compute-types.html) in the *AWS CodeBuild User Guide.*", "title": "ComputeType", "type": "string" }, "EnvironmentType": { - "markdownDescription": "The environment type of the compute fleet.\n\n- The environment type `ARM_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), Asia Pacific (Mumbai), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), EU (Frankfurt), and South America (S\u00e3o Paulo).\n- The environment type `LINUX_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), South America (S\u00e3o Paulo), and Asia Pacific (Mumbai).\n- The environment type `LINUX_GPU_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), and Asia Pacific (Sydney).\n- The environment type `WINDOWS_SERVER_2019_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), Asia Pacific (Sydney), Asia Pacific (Tokyo), Asia Pacific (Mumbai) and EU (Ireland).\n- The environment type `WINDOWS_SERVER_2022_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Sydney), Asia Pacific (Singapore), Asia Pacific (Tokyo), South America (S\u00e3o Paulo) and Asia Pacific (Mumbai).\n\nFor more information, see [Build environment compute types](https://docs.aws.amazon.com//codebuild/latest/userguide/build-env-ref-compute-types.html) in the *AWS CodeBuild user guide* .", + "markdownDescription": "> Updating this field is not allowed for `MAC_ARM` . \n\nThe environment type of the compute fleet.\n\n- The environment type `ARM_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), Asia Pacific (Mumbai), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), EU (Frankfurt), and South America (S\u00e3o Paulo).\n- The environment type `LINUX_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Singapore), Asia Pacific (Sydney), South America (S\u00e3o Paulo), and Asia Pacific (Mumbai).\n- The environment type `LINUX_GPU_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Tokyo), and Asia Pacific (Sydney).\n- The environment type `WINDOWS_SERVER_2019_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), Asia Pacific (Sydney), Asia Pacific (Tokyo), Asia Pacific (Mumbai) and EU (Ireland).\n- The environment type `WINDOWS_SERVER_2022_CONTAINER` is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Sydney), Asia Pacific (Singapore), Asia Pacific (Tokyo), South America (S\u00e3o Paulo) and Asia Pacific (Mumbai).\n\nFor more information, see [Build environment compute types](https://docs.aws.amazon.com//codebuild/latest/userguide/build-env-ref-compute-types.html) in the *AWS CodeBuild user guide* .", "title": "EnvironmentType", "type": "string" }, @@ -41892,7 +41886,7 @@ "properties": { "Auth": { "$ref": "#/definitions/AWS::CodeBuild::Project.SourceAuth", - "markdownDescription": "Information about the authorization settings for AWS CodeBuild to access the source code to be built.\n\nThis information is for the AWS CodeBuild console's use only. Your code should not get or set `Auth` directly.", + "markdownDescription": "Information about the authorization settings for AWS CodeBuild to access the source code to be built.", "title": "Auth" }, "BuildSpec": { @@ -41950,12 +41944,12 @@ "additionalProperties": false, "properties": { "Resource": { - "markdownDescription": "The resource value that applies to the specified authorization type.\n\n> This data type is used by the AWS CodeBuild console only.", + "markdownDescription": "The resource value that applies to the specified authorization type.", "title": "Resource", "type": "string" }, "Type": { - "markdownDescription": "The authorization type to use. The only valid value is `OAUTH` , which represents the OAuth authorization type.\n\n> This data type is used by the AWS CodeBuild console only.", + "markdownDescription": "The authorization type to use. Valid options are OAUTH, CODECONNECTIONS, or SECRETS_MANAGER.", "title": "Type", "type": "string" } @@ -42202,7 +42196,7 @@ "additionalProperties": false, "properties": { "AuthType": { - "markdownDescription": "The type of authentication used by the credentials. Valid options are OAUTH, BASIC_AUTH, PERSONAL_ACCESS_TOKEN, or CODECONNECTIONS.", + "markdownDescription": "The type of authentication used by the credentials. Valid options are OAUTH, BASIC_AUTH, PERSONAL_ACCESS_TOKEN, CODECONNECTIONS, or SECRETS_MANAGER.", "title": "AuthType", "type": "string" }, @@ -42212,7 +42206,7 @@ "type": "string" }, "Token": { - "markdownDescription": "For GitHub or GitHub Enterprise, this is the personal access token. For Bitbucket, this is either the access token or the app password. For the `authType` CODECONNECTIONS, this is the `connectionArn` .", + "markdownDescription": "For GitHub or GitHub Enterprise, this is the personal access token. For Bitbucket, this is either the access token or the app password. For the `authType` CODECONNECTIONS, this is the `connectionArn` . For the `authType` SECRETS_MANAGER, this is the `secretArn` .", "title": "Token", "type": "string" }, @@ -44503,7 +44497,7 @@ "additionalProperties": false, "properties": { "Authentication": { - "markdownDescription": "Supported options are GITHUB_HMAC, IP, and UNAUTHENTICATED.\n\n> When creating CodePipeline webhooks, do not use your own credentials or reuse the same secret token across multiple webhooks. For optimal security, generate a unique secret token for each webhook you create. The secret token is an arbitrary string that you provide, which GitHub uses to compute and sign the webhook payloads sent to CodePipeline, for protecting the integrity and authenticity of the webhook payloads. Using your own credentials or reusing the same token across multiple webhooks can lead to security vulnerabilities. \n\n- For information about the authentication scheme implemented by GITHUB_HMAC, see [Securing your webhooks](https://docs.aws.amazon.com/https://developer.github.com/webhooks/securing/) on the GitHub Developer website.\n- IP rejects webhooks trigger requests unless they originate from an IP address in the IP range whitelisted in the authentication configuration.\n- UNAUTHENTICATED accepts all webhook trigger requests regardless of origin.", + "markdownDescription": "Supported options are GITHUB_HMAC, IP, and UNAUTHENTICATED.\n\n> When creating CodePipeline webhooks, do not use your own credentials or reuse the same secret token across multiple webhooks. For optimal security, generate a unique secret token for each webhook you create. The secret token is an arbitrary string that you provide, which GitHub uses to compute and sign the webhook payloads sent to CodePipeline, for protecting the integrity and authenticity of the webhook payloads. Using your own credentials or reusing the same token across multiple webhooks can lead to security vulnerabilities. > If a secret token was provided, it will be redacted in the response. \n\n- For information about the authentication scheme implemented by GITHUB_HMAC, see [Securing your webhooks](https://docs.aws.amazon.com/https://developer.github.com/webhooks/securing/) on the GitHub Developer website.\n- IP rejects webhooks trigger requests unless they originate from an IP address in the IP range whitelisted in the authentication configuration.\n- UNAUTHENTICATED accepts all webhook trigger requests regardless of origin.", "title": "Authentication", "type": "string" }, @@ -44586,7 +44580,7 @@ "type": "string" }, "SecretToken": { - "markdownDescription": "The property used to configure GitHub authentication. For GITHUB_HMAC, only the `SecretToken` property must be set.\n\n> When creating CodePipeline webhooks, do not use your own credentials or reuse the same secret token across multiple webhooks. For optimal security, generate a unique secret token for each webhook you create. The secret token is an arbitrary string that you provide, which GitHub uses to compute and sign the webhook payloads sent to CodePipeline, for protecting the integrity and authenticity of the webhook payloads. Using your own credentials or reusing the same token across multiple webhooks can lead to security vulnerabilities.", + "markdownDescription": "The property used to configure GitHub authentication. For GITHUB_HMAC, only the `SecretToken` property must be set.\n\n> When creating CodePipeline webhooks, do not use your own credentials or reuse the same secret token across multiple webhooks. For optimal security, generate a unique secret token for each webhook you create. The secret token is an arbitrary string that you provide, which GitHub uses to compute and sign the webhook payloads sent to CodePipeline, for protecting the integrity and authenticity of the webhook payloads. Using your own credentials or reusing the same token across multiple webhooks can lead to security vulnerabilities. > If a secret token was provided, it will be redacted in the response.", "title": "SecretToken", "type": "string" } @@ -45666,12 +45660,12 @@ "items": { "$ref": "#/definitions/AWS::Cognito::LogDeliveryConfiguration.LogConfiguration" }, - "markdownDescription": "The detailed activity logging destination of a user pool.", + "markdownDescription": "A logging destination of a user pool. User pools can have multiple logging destinations for message-delivery and user-activity logs.", "title": "LogConfigurations", "type": "array" }, "UserPoolId": { - "markdownDescription": "The ID of the user pool where you configured detailed activity logging.", + "markdownDescription": "The ID of the user pool where you configured logging.", "title": "UserPoolId", "type": "string" } @@ -45718,16 +45712,16 @@ "properties": { "CloudWatchLogsConfiguration": { "$ref": "#/definitions/AWS::Cognito::LogDeliveryConfiguration.CloudWatchLogsConfiguration", - "markdownDescription": "The CloudWatch logging destination of a user pool detailed activity logging configuration.", + "markdownDescription": "Configuration for the CloudWatch log group destination of user pool detailed activity logging, or of user activity log export with advanced security features.\n\nThis data type is a request parameter of [SetLogDeliveryConfiguration](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_SetLogDeliveryConfiguration.html) and a response parameter of [GetLogDeliveryConfiguration](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_GetLogDeliveryConfiguration.html) .", "title": "CloudWatchLogsConfiguration" }, "EventSource": { - "markdownDescription": "The source of events that your user pool sends for detailed activity logging.", + "markdownDescription": "The source of events that your user pool sends for logging. To send error-level logs about user notification activity, set to `userNotification` . To send info-level logs about advanced security features user activity, set to `userAuthEvents` .", "title": "EventSource", "type": "string" }, "LogLevel": { - "markdownDescription": "The `errorlevel` selection of logs that a user pool sends for detailed activity logging.", + "markdownDescription": "The `errorlevel` selection of logs that a user pool sends for detailed activity logging. To send `userNotification` activity with [information about message delivery](https://docs.aws.amazon.com/cognito/latest/developerguide/tracking-quotas-and-usage-in-cloud-watch-logs.html) , choose `ERROR` with `CloudWatchLogsConfiguration` . To send `userAuthEvents` activity with user logs from advanced security features, choose `INFO` with one of `CloudWatchLogsConfiguration` , `FirehoseConfiguration` , or `S3Configuration` .", "title": "LogLevel", "type": "string" } @@ -45776,7 +45770,7 @@ }, "AdminCreateUserConfig": { "$ref": "#/definitions/AWS::Cognito::UserPool.AdminCreateUserConfig", - "markdownDescription": "The configuration for creating a new user profile.", + "markdownDescription": "The settings for administrator creation of users in a user pool. Contains settings for allowing user sign-up, customizing invitation messages to new users, and the amount of time before temporary passwords expire.\n\nThis data type is a request and response parameter of [CreateUserPool](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_CreateUserPool.html) and [UpdateUserPool](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_UpdateUserPool.html) , and a response parameter of [DescribeUserPool](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_DescribeUserPool.html) .", "title": "AdminCreateUserConfig" }, "AliasAttributes": { @@ -45830,7 +45824,7 @@ }, "LambdaConfig": { "$ref": "#/definitions/AWS::Cognito::UserPool.LambdaConfig", - "markdownDescription": "The Lambda trigger configuration information for the new user pool.\n\n> In a push model, event sources (such as Amazon S3 and custom applications) need permission to invoke a function. So you must make an extra call to add permission for these event sources to invoke your Lambda function.\n> \n> For more information on using the Lambda API to add permission, see [AddPermission](https://docs.aws.amazon.com/lambda/latest/dg/API_AddPermission.html) .\n> \n> For adding permission using the AWS CLI , see [add-permission](https://docs.aws.amazon.com/cli/latest/reference/lambda/add-permission.html) .", + "markdownDescription": "A collection of user pool Lambda triggers. Amazon Cognito invokes triggers at several possible stages of authentication operations. Triggers can modify the outcome of the operations that invoked them.", "title": "LambdaConfig" }, "MfaConfiguration": { @@ -45840,7 +45834,7 @@ }, "Policies": { "$ref": "#/definitions/AWS::Cognito::UserPool.Policies", - "markdownDescription": "The policy associated with a user pool.", + "markdownDescription": "A list of user pool policies. Contains the policy that sets password-complexity requirements.\n\nThis data type is a request and response parameter of [CreateUserPool](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_CreateUserPool.html) and [UpdateUserPool](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_UpdateUserPool.html) , and a response parameter of [DescribeUserPool](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_DescribeUserPool.html) .", "title": "Policies" }, "Schema": { @@ -45907,7 +45901,7 @@ }, "VerificationMessageTemplate": { "$ref": "#/definitions/AWS::Cognito::UserPool.VerificationMessageTemplate", - "markdownDescription": "The template for the verification message that the user sees when the app requests permission to access the user's information.", + "markdownDescription": "The template for the verification message that your user pool delivers to users who set an email address or phone number attribute.\n\nSet the email message type that corresponds to your `DefaultEmailOption` selection. For `CONFIRM_WITH_LINK` , specify an `EmailMessageByLink` and leave `EmailMessage` blank. For `CONFIRM_WITH_CODE` , specify an `EmailMessage` and leave `EmailMessageByLink` blank. When you supply both parameters with either choice, Amazon Cognito returns an error.", "title": "VerificationMessageTemplate" } }, @@ -45951,7 +45945,7 @@ "additionalProperties": false, "properties": { "AllowAdminCreateUserOnly": { - "markdownDescription": "Set to `True` if only the administrator is allowed to create user profiles. Set to `False` if users can sign themselves up via an app.", + "markdownDescription": "The setting for allowing self-service sign-up. When `true` , only administrators can create new user profiles. When `false` , users can register themselves and create a new user profile with the [SignUp](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_SignUp.html) operation.", "title": "AllowAdminCreateUserOnly", "type": "boolean" }, @@ -45961,7 +45955,7 @@ "title": "InviteMessageTemplate" }, "UnusedAccountValidityDays": { - "markdownDescription": "The user account expiration limit, in days, after which a new account that hasn't signed in is no longer usable. To reset the account after that time limit, you must call `AdminCreateUser` again, specifying `\"RESEND\"` for the `MessageAction` parameter. The default value for this parameter is 7.\n\n> If you set a value for `TemporaryPasswordValidityDays` in `PasswordPolicy` , that value will be used, and `UnusedAccountValidityDays` will be no longer be an available parameter for that user pool.", + "markdownDescription": "This parameter is no longer in use. Configure the duration of temporary passwords with the `TemporaryPasswordValidityDays` parameter of [PasswordPolicyType](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_PasswordPolicyType.html) . For older user pools that have a `UnusedAccountValidityDays` configuration, that value is effective until you set a value for `TemporaryPasswordValidityDays` .\n\nThe password expiration limit in days for administrator-created users. When this time expires, the user can't sign in with their temporary password. To reset the account after that time limit, you must call `AdminCreateUser` again, specifying `RESEND` for the `MessageAction` parameter.\n\nThe default value for this parameter is 7.", "title": "UnusedAccountValidityDays", "type": "number" } @@ -46072,7 +46066,7 @@ "additionalProperties": false, "properties": { "CreateAuthChallenge": { - "markdownDescription": "Creates an authentication challenge.", + "markdownDescription": "The configuration of a create auth challenge Lambda trigger, one of three triggers in the sequence of the [custom authentication challenge triggers](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-lambda-challenge.html) .", "title": "CreateAuthChallenge", "type": "string" }, @@ -46082,7 +46076,7 @@ "title": "CustomEmailSender" }, "CustomMessage": { - "markdownDescription": "A custom Message AWS Lambda trigger.", + "markdownDescription": "A custom message Lambda trigger. This trigger is an opportunity to customize all SMS and email messages from your user pool. When a custom message trigger is active, your user pool routes all messages to a Lambda function that returns a runtime-customized message subject and body for your user pool to deliver to a user.", "title": "CustomMessage", "type": "string" }, @@ -46092,7 +46086,7 @@ "title": "CustomSMSSender" }, "DefineAuthChallenge": { - "markdownDescription": "Defines the authentication challenge.", + "markdownDescription": "The configuration of a define auth challenge Lambda trigger, one of three triggers in the sequence of the [custom authentication challenge triggers](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-lambda-challenge.html) .", "title": "DefineAuthChallenge", "type": "string" }, @@ -46102,42 +46096,42 @@ "type": "string" }, "PostAuthentication": { - "markdownDescription": "A post-authentication AWS Lambda trigger.", + "markdownDescription": "The configuration of a [post authentication Lambda trigger](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-lambda-post-authentication.html) in a user pool. This trigger can take custom actions after a user signs in.", "title": "PostAuthentication", "type": "string" }, "PostConfirmation": { - "markdownDescription": "A post-confirmation AWS Lambda trigger.", + "markdownDescription": "The configuration of a [post confirmation Lambda trigger](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-lambda-post-confirmation.html) in a user pool. This trigger can take custom actions after a user confirms their user account and their email address or phone number.", "title": "PostConfirmation", "type": "string" }, "PreAuthentication": { - "markdownDescription": "A pre-authentication AWS Lambda trigger.", + "markdownDescription": "The configuration of a [pre authentication trigger](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-lambda-pre-authentication.html) in a user pool. This trigger can evaluate and modify user sign-in events.", "title": "PreAuthentication", "type": "string" }, "PreSignUp": { - "markdownDescription": "A pre-registration AWS Lambda trigger.", + "markdownDescription": "The configuration of a [pre sign-up Lambda trigger](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-lambda-pre-sign-up.html) in a user pool. This trigger evaluates new users and can bypass confirmation, [link a federated user profile](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pools-identity-federation-consolidate-users.html) , or block sign-up requests.", "title": "PreSignUp", "type": "string" }, "PreTokenGeneration": { - "markdownDescription": "The Amazon Resource Name (ARN) of the function that you want to assign to your Lambda trigger.\n\nSet this parameter for legacy purposes. If you also set an ARN in `PreTokenGenerationConfig` , its value must be identical to `PreTokenGeneration` . For new instances of pre token generation triggers, set the `LambdaArn` of `PreTokenGenerationConfig` .\n\nYou can set ``", + "markdownDescription": "The legacy configuration of a [pre token generation Lambda trigger](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-lambda-pre-token-generation.html) in a user pool.\n\nSet this parameter for legacy purposes. If you also set an ARN in `PreTokenGenerationConfig` , its value must be identical to `PreTokenGeneration` . For new instances of pre token generation triggers, set the `LambdaArn` of `PreTokenGenerationConfig` .", "title": "PreTokenGeneration", "type": "string" }, "PreTokenGenerationConfig": { "$ref": "#/definitions/AWS::Cognito::UserPool.PreTokenGenerationConfig", - "markdownDescription": "The detailed configuration of a pre token generation trigger. If you also set an ARN in `PreTokenGeneration` , its value must be identical to `PreTokenGenerationConfig` .", + "markdownDescription": "The detailed configuration of a [pre token generation Lambda trigger](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-lambda-pre-token-generation.html) in a user pool. If you also set an ARN in `PreTokenGeneration` , its value must be identical to `PreTokenGenerationConfig` .", "title": "PreTokenGenerationConfig" }, "UserMigration": { - "markdownDescription": "The user migration Lambda config type.", + "markdownDescription": "The configuration of a [migrate user Lambda trigger](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-lambda-migrate-user.html) in a user pool. This trigger can create user profiles when users sign in or attempt to reset their password with credentials that don't exist yet.", "title": "UserMigration", "type": "string" }, "VerifyAuthChallengeResponse": { - "markdownDescription": "Verifies the authentication challenge response.", + "markdownDescription": "The configuration of a verify auth challenge Lambda trigger, one of three triggers in the sequence of the [custom authentication challenge triggers](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-lambda-challenge.html) .", "title": "VerifyAuthChallengeResponse", "type": "string" } @@ -46169,22 +46163,22 @@ "type": "number" }, "RequireLowercase": { - "markdownDescription": "In the password policy that you have set, refers to whether you have required users to use at least one lowercase letter in their password.", + "markdownDescription": "The requirement in a password policy that users must include at least one lowercase letter in their password.", "title": "RequireLowercase", "type": "boolean" }, "RequireNumbers": { - "markdownDescription": "In the password policy that you have set, refers to whether you have required users to use at least one number in their password.", + "markdownDescription": "The requirement in a password policy that users must include at least one number in their password.", "title": "RequireNumbers", "type": "boolean" }, "RequireSymbols": { - "markdownDescription": "In the password policy that you have set, refers to whether you have required users to use at least one symbol in their password.", + "markdownDescription": "The requirement in a password policy that users must include at least one symbol in their password.", "title": "RequireSymbols", "type": "boolean" }, "RequireUppercase": { - "markdownDescription": "In the password policy that you have set, refers to whether you have required users to use at least one uppercase letter in their password.", + "markdownDescription": "The requirement in a password policy that users must include at least one uppercase letter in their password.", "title": "RequireUppercase", "type": "boolean" }, @@ -46201,7 +46195,7 @@ "properties": { "PasswordPolicy": { "$ref": "#/definitions/AWS::Cognito::UserPool.PasswordPolicy", - "markdownDescription": "The password policy.", + "markdownDescription": "The password policy settings for a user pool, including complexity, history, and length requirements.", "title": "PasswordPolicy" } }, @@ -46310,7 +46304,7 @@ "type": "string" }, "MinLength": { - "markdownDescription": "The minimum length.", + "markdownDescription": "The minimum length of a string attribute value.", "title": "MinLength", "type": "string" } @@ -46338,7 +46332,7 @@ "additionalProperties": false, "properties": { "AdvancedSecurityMode": { - "markdownDescription": "The operating mode of advanced security features in your user pool.", + "markdownDescription": "The operating mode of advanced security features for standard authentication types in your user pool, including username-password and secure remote password (SRP) authentication.", "title": "AdvancedSecurityMode", "type": "string" } @@ -46349,7 +46343,7 @@ "additionalProperties": false, "properties": { "CaseSensitive": { - "markdownDescription": "Specifies whether user name case sensitivity will be applied for all users in the user pool through Amazon Cognito APIs. For most use cases, set case sensitivity to `False` (case insensitive) as a best practice. When usernames and email addresses are case insensitive, users can sign in as the same user when they enter a different capitalization of their user name.\n\nValid values include:\n\n- **True** - Enables case sensitivity for all username input. When this option is set to `True` , users must sign in using the exact capitalization of their given username, such as \u201cUserName\u201d. This is the default value.\n- **False** - Enables case insensitivity for all username input. For example, when this option is set to `False` , users can sign in using `username` , `USERNAME` , or `UserName` . This option also enables both `preferred_username` and `email` alias to be case insensitive, in addition to the `username` attribute.", + "markdownDescription": "Specifies whether user name case sensitivity will be applied for all users in the user pool through Amazon Cognito APIs. For most use cases, set case sensitivity to `False` (case insensitive) as a best practice. When usernames and email addresses are case insensitive, users can sign in as the same user when they enter a different capitalization of their user name.\n\nValid values include:\n\n- **true** - Enables case sensitivity for all username input. When this option is set to `true` , users must sign in using the exact capitalization of their given username, such as \u201cUserName\u201d. This is the default value.\n- **false** - Enables case insensitivity for all username input. For example, when this option is set to `false` , users can sign in using `username` , `USERNAME` , or `UserName` . This option also enables both `preferred_username` and `email` alias to be case insensitive, in addition to the `username` attribute.", "title": "CaseSensitive", "type": "boolean" } @@ -46360,7 +46354,7 @@ "additionalProperties": false, "properties": { "DefaultEmailOption": { - "markdownDescription": "The default email option.", + "markdownDescription": "The configuration of verification emails to contain a clickable link or a verification code.\n\nFor link, your template body must contain link text in the format `{##Click here##}` . \"Click here\" in the example is a customizable string. For code, your template body must contain a code placeholder in the format `{####}` .", "title": "DefaultEmailOption", "type": "string" }, @@ -46526,7 +46520,7 @@ "items": { "type": "string" }, - "markdownDescription": "The list of user attributes that you want your app client to have read-only access to. After your user authenticates in your app, their access token authorizes them to read their own attribute value for any attribute in this list. An example of this kind of activity is when your user selects a link to view their profile information. Your app makes a [GetUser](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_GetUser.html) API request to retrieve and display your user's profile data.\n\nWhen you don't specify the `ReadAttributes` for your app client, your app can read the values of `email_verified` , `phone_number_verified` , and the Standard attributes of your user pool. When your user pool has read access to these default attributes, `ReadAttributes` doesn't return any information. Amazon Cognito only populates `ReadAttributes` in the API response if you have specified your own custom set of read attributes.", + "markdownDescription": "The list of user attributes that you want your app client to have read access to. After your user authenticates in your app, their access token authorizes them to read their own attribute value for any attribute in this list. An example of this kind of activity is when your user selects a link to view their profile information. Your app makes a [GetUser](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_GetUser.html) API request to retrieve and display your user's profile data.\n\nWhen you don't specify the `ReadAttributes` for your app client, your app can read the values of `email_verified` , `phone_number_verified` , and the Standard attributes of your user pool. When your user pool app client has read access to these default attributes, `ReadAttributes` doesn't return any information. Amazon Cognito only populates `ReadAttributes` in the API response if you have specified your own custom set of read attributes.", "title": "ReadAttributes", "type": "array" }, @@ -46597,17 +46591,17 @@ "type": "string" }, "ApplicationId": { - "markdownDescription": "The application ID for an Amazon Pinpoint application.", + "markdownDescription": "Your Amazon Pinpoint project ID.", "title": "ApplicationId", "type": "string" }, "ExternalId": { - "markdownDescription": "The external ID.", + "markdownDescription": "The [external ID](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html) of the role that Amazon Cognito assumes to send analytics data to Amazon Pinpoint.", "title": "ExternalId", "type": "string" }, "RoleArn": { - "markdownDescription": "The ARN of an AWS Identity and Access Management role that authorizes Amazon Cognito to publish events to Amazon Pinpoint analytics.", + "markdownDescription": "The ARN of an AWS Identity and Access Management role that has the permissions required for Amazon Cognito to publish events to Amazon Pinpoint analytics.", "title": "RoleArn", "type": "string" }, @@ -46623,17 +46617,17 @@ "additionalProperties": false, "properties": { "AccessToken": { - "markdownDescription": "A time unit of `seconds` , `minutes` , `hours` , or `days` for the value that you set in the `AccessTokenValidity` parameter. The default `AccessTokenValidity` time unit is hours. `AccessTokenValidity` duration can range from five minutes to one day.", + "markdownDescription": "A time unit for the value that you set in the `AccessTokenValidity` parameter. The default `AccessTokenValidity` time unit is `hours` . `AccessTokenValidity` duration can range from five minutes to one day.", "title": "AccessToken", "type": "string" }, "IdToken": { - "markdownDescription": "A time unit of `seconds` , `minutes` , `hours` , or `days` for the value that you set in the `IdTokenValidity` parameter. The default `IdTokenValidity` time unit is hours. `IdTokenValidity` duration can range from five minutes to one day.", + "markdownDescription": "A time unit for the value that you set in the `IdTokenValidity` parameter. The default `IdTokenValidity` time unit is `hours` . `IdTokenValidity` duration can range from five minutes to one day.", "title": "IdToken", "type": "string" }, "RefreshToken": { - "markdownDescription": "A time unit of `seconds` , `minutes` , `hours` , or `days` for the value that you set in the `RefreshTokenValidity` parameter. The default `RefreshTokenValidity` time unit is days. `RefreshTokenValidity` duration can range from 60 minutes to 10 years.", + "markdownDescription": "A time unit for the value that you set in the `RefreshTokenValidity` parameter. The default `RefreshTokenValidity` time unit is `days` . `RefreshTokenValidity` duration can range from 60 minutes to 10 years.", "title": "RefreshToken", "type": "string" } @@ -47004,12 +46998,12 @@ "additionalProperties": false, "properties": { "ScopeDescription": { - "markdownDescription": "A description of the scope.", + "markdownDescription": "A friendly description of a custom scope.", "title": "ScopeDescription", "type": "string" }, "ScopeName": { - "markdownDescription": "The name of the scope.", + "markdownDescription": "The name of the scope. Amazon Cognito renders custom scopes in the format `resourceServerIdentifier/ScopeName` . For example, if this parameter is `exampleScope` in the resource server with the identifier `exampleResourceServer` , you request and receive the scope `exampleResourceServer/exampleScope` .", "title": "ScopeName", "type": "string" } @@ -47057,7 +47051,7 @@ "properties": { "AccountTakeoverRiskConfiguration": { "$ref": "#/definitions/AWS::Cognito::UserPoolRiskConfigurationAttachment.AccountTakeoverRiskConfigurationType", - "markdownDescription": "The account takeover risk configuration object, including the `NotifyConfiguration` object and `Actions` to take if there is an account takeover.", + "markdownDescription": "The settings for automated responses and notification templates for adaptive authentication with advanced security features.", "title": "AccountTakeoverRiskConfiguration" }, "ClientId": { @@ -47067,16 +47061,16 @@ }, "CompromisedCredentialsRiskConfiguration": { "$ref": "#/definitions/AWS::Cognito::UserPoolRiskConfigurationAttachment.CompromisedCredentialsRiskConfigurationType", - "markdownDescription": "The compromised credentials risk configuration object, including the `EventFilter` and the `EventAction` .", + "markdownDescription": "Settings for compromised-credentials actions and authentication types with advanced security features in full-function `ENFORCED` mode.", "title": "CompromisedCredentialsRiskConfiguration" }, "RiskExceptionConfiguration": { "$ref": "#/definitions/AWS::Cognito::UserPoolRiskConfigurationAttachment.RiskExceptionConfigurationType", - "markdownDescription": "The configuration to override the risk decision.", + "markdownDescription": "Exceptions to the risk evaluation configuration, including always-allow and always-block IP address ranges.", "title": "RiskExceptionConfiguration" }, "UserPoolId": { - "markdownDescription": "The user pool ID.", + "markdownDescription": "The ID of the user pool that has the risk configuration applied.", "title": "UserPoolId", "type": "string" } @@ -47112,12 +47106,12 @@ "additionalProperties": false, "properties": { "EventAction": { - "markdownDescription": "The action to take in response to the account takeover action. Valid values are as follows:\n\n- `BLOCK` Choosing this action will block the request.\n- `MFA_IF_CONFIGURED` Present an MFA challenge if user has configured it, else allow the request.\n- `MFA_REQUIRED` Present an MFA challenge if user has configured it, else block the request.\n- `NO_ACTION` Allow the user to sign in.", + "markdownDescription": "The action to take for the attempted account takeover action for the associated risk level. Valid values are as follows:\n\n- `BLOCK` : Block the request.\n- `MFA_IF_CONFIGURED` : Present an MFA challenge if possible. MFA is possible if the user pool has active MFA methods that the user can set up. For example, if the user pool only supports SMS message MFA but the user doesn't have a phone number attribute, MFA setup isn't possible. If MFA setup isn't possible, allow the request.\n- `MFA_REQUIRED` : Present an MFA challenge if possible. Block the request if a user hasn't set up MFA. To sign in with required MFA, users must have an email address or phone number attribute, or a registered TOTP factor.\n- `NO_ACTION` : Take no action. Permit sign-in.", "title": "EventAction", "type": "string" }, "Notify": { - "markdownDescription": "Flag specifying whether to send a notification.", + "markdownDescription": "Determines whether Amazon Cognito sends a user a notification message when your user pools assesses a user's session at the associated risk level.", "title": "Notify", "type": "boolean" } @@ -47133,17 +47127,17 @@ "properties": { "HighAction": { "$ref": "#/definitions/AWS::Cognito::UserPoolRiskConfigurationAttachment.AccountTakeoverActionType", - "markdownDescription": "Action to take for a high risk.", + "markdownDescription": "The action that you assign to a high-risk assessment by advanced security features.", "title": "HighAction" }, "LowAction": { "$ref": "#/definitions/AWS::Cognito::UserPoolRiskConfigurationAttachment.AccountTakeoverActionType", - "markdownDescription": "Action to take for a low risk.", + "markdownDescription": "The action that you assign to a low-risk assessment by advanced security features.", "title": "LowAction" }, "MediumAction": { "$ref": "#/definitions/AWS::Cognito::UserPoolRiskConfigurationAttachment.AccountTakeoverActionType", - "markdownDescription": "Action to take for a medium risk.", + "markdownDescription": "The action that you assign to a medium-risk assessment by advanced security features.", "title": "MediumAction" } }, @@ -47154,12 +47148,12 @@ "properties": { "Actions": { "$ref": "#/definitions/AWS::Cognito::UserPoolRiskConfigurationAttachment.AccountTakeoverActionsType", - "markdownDescription": "Account takeover risk configuration actions.", + "markdownDescription": "A list of account-takeover actions for each level of risk that Amazon Cognito might assess with advanced security features.", "title": "Actions" }, "NotifyConfiguration": { "$ref": "#/definitions/AWS::Cognito::UserPoolRiskConfigurationAttachment.NotifyConfigurationType", - "markdownDescription": "The notify configuration used to construct email notifications.", + "markdownDescription": "The settings for composing and sending an email message when advanced security features assesses a risk level with adaptive authentication. When you choose to notify users in `AccountTakeoverRiskConfiguration` , Amazon Cognito sends an email message using the method and template that you set with this data type.", "title": "NotifyConfiguration" } }, @@ -47172,7 +47166,7 @@ "additionalProperties": false, "properties": { "EventAction": { - "markdownDescription": "The event action.", + "markdownDescription": "The action that Amazon Cognito takes when it detects compromised credentials.", "title": "EventAction", "type": "string" } @@ -47187,14 +47181,14 @@ "properties": { "Actions": { "$ref": "#/definitions/AWS::Cognito::UserPoolRiskConfigurationAttachment.CompromisedCredentialsActionsType", - "markdownDescription": "The compromised credentials risk configuration actions.", + "markdownDescription": "Settings for the actions that you want your user pool to take when Amazon Cognito detects compromised credentials.", "title": "Actions" }, "EventFilter": { "items": { "type": "string" }, - "markdownDescription": "Perform the action for these events. The default is to perform all events if no event filter is specified.", + "markdownDescription": "Settings for the sign-in activity where you want to configure compromised-credentials actions. Defaults to all events.", "title": "EventFilter", "type": "array" } @@ -47209,26 +47203,26 @@ "properties": { "BlockEmail": { "$ref": "#/definitions/AWS::Cognito::UserPoolRiskConfigurationAttachment.NotifyEmailType", - "markdownDescription": "Email template used when a detected risk event is blocked.", + "markdownDescription": "The template for the email message that your user pool sends when a detected risk event is blocked.", "title": "BlockEmail" }, "From": { - "markdownDescription": "The email address that is sending the email. The address must be either individually verified with Amazon Simple Email Service, or from a domain that has been verified with Amazon SES.", + "markdownDescription": "The email address that sends the email message. The address must be either individually verified with Amazon Simple Email Service, or from a domain that has been verified with Amazon SES.", "title": "From", "type": "string" }, "MfaEmail": { "$ref": "#/definitions/AWS::Cognito::UserPoolRiskConfigurationAttachment.NotifyEmailType", - "markdownDescription": "The multi-factor authentication (MFA) email template used when MFA is challenged as part of a detected risk.", + "markdownDescription": "The template for the email message that your user pool sends when MFA is challenged in response to a detected risk.", "title": "MfaEmail" }, "NoActionEmail": { "$ref": "#/definitions/AWS::Cognito::UserPoolRiskConfigurationAttachment.NotifyEmailType", - "markdownDescription": "The email template used when a detected risk event is allowed.", + "markdownDescription": "The template for the email message that your user pool sends when no action is taken in response to a detected risk.", "title": "NoActionEmail" }, "ReplyTo": { - "markdownDescription": "The destination to which the receiver of an email should reply to.", + "markdownDescription": "The reply-to email address of an email template.", "title": "ReplyTo", "type": "string" }, @@ -47247,17 +47241,17 @@ "additionalProperties": false, "properties": { "HtmlBody": { - "markdownDescription": "The email HTML body.", + "markdownDescription": "The body of an email notification formatted in HTML. Choose an `HtmlBody` or a `TextBody` to send an HTML-formatted or plaintext message, respectively.", "title": "HtmlBody", "type": "string" }, "Subject": { - "markdownDescription": "The email subject.", + "markdownDescription": "The subject of the threat protection email notification.", "title": "Subject", "type": "string" }, "TextBody": { - "markdownDescription": "The email text body.", + "markdownDescription": "The body of an email notification formatted in plaintext. Choose an `HtmlBody` or a `TextBody` to send an HTML-formatted or plaintext message, respectively.", "title": "TextBody", "type": "string" } @@ -47274,7 +47268,7 @@ "items": { "type": "string" }, - "markdownDescription": "Overrides the risk decision to always block the pre-authentication requests. The IP range is in CIDR notation, a compact representation of an IP address and its routing prefix.", + "markdownDescription": "An always-block IP address list. Overrides the risk decision and always blocks authentication requests. This parameter is displayed and set in CIDR notation.", "title": "BlockedIPRangeList", "type": "array" }, @@ -47282,7 +47276,7 @@ "items": { "type": "string" }, - "markdownDescription": "Risk detection isn't performed on the IP addresses in this range list. The IP range is in CIDR notation.", + "markdownDescription": "An always-allow IP address list. Risk detection isn't performed on the IP addresses in this range list. This parameter is displayed and set in CIDR notation.", "title": "SkippedIPRangeList", "type": "array" } @@ -61701,32 +61695,32 @@ "additionalProperties": false, "properties": { "AccessPointArn": { - "markdownDescription": "Specifies the Amazon Resource Name (ARN) of the access point that DataSync uses to access the Amazon EFS file system.", + "markdownDescription": "Specifies the Amazon Resource Name (ARN) of the access point that DataSync uses to mount your Amazon EFS file system.\n\nFor more information, see [Accessing restricted file systems](https://docs.aws.amazon.com/datasync/latest/userguide/create-efs-location.html#create-efs-location-iam) .", "title": "AccessPointArn", "type": "string" }, "Ec2Config": { "$ref": "#/definitions/AWS::DataSync::LocationEFS.Ec2Config", - "markdownDescription": "Specifies the subnet and security groups DataSync uses to access your Amazon EFS file system.", + "markdownDescription": "Specifies the subnet and security groups DataSync uses to connect to one of your Amazon EFS file system's [mount targets](https://docs.aws.amazon.com/efs/latest/ug/accessing-fs.html) .", "title": "Ec2Config" }, "EfsFilesystemArn": { - "markdownDescription": "Specifies the ARN for the Amazon EFS file system.", + "markdownDescription": "Specifies the ARN for your Amazon EFS file system.", "title": "EfsFilesystemArn", "type": "string" }, "FileSystemAccessRoleArn": { - "markdownDescription": "Specifies an AWS Identity and Access Management (IAM) role that DataSync assumes when mounting the Amazon EFS file system.", + "markdownDescription": "Specifies an AWS Identity and Access Management (IAM) role that allows DataSync to access your Amazon EFS file system.\n\nFor information on creating this role, see [Creating a DataSync IAM role for file system access](https://docs.aws.amazon.com/datasync/latest/userguide/create-efs-location.html#create-efs-location-iam-role) .", "title": "FileSystemAccessRoleArn", "type": "string" }, "InTransitEncryption": { - "markdownDescription": "Specifies whether you want DataSync to use Transport Layer Security (TLS) 1.2 encryption when it copies data to or from the Amazon EFS file system.\n\nIf you specify an access point using `AccessPointArn` or an IAM role using `FileSystemAccessRoleArn` , you must set this parameter to `TLS1_2` .", + "markdownDescription": "Specifies whether you want DataSync to use Transport Layer Security (TLS) 1.2 encryption when it transfers data to or from your Amazon EFS file system.\n\nIf you specify an access point using `AccessPointArn` or an IAM role using `FileSystemAccessRoleArn` , you must set this parameter to `TLS1_2` .", "title": "InTransitEncryption", "type": "string" }, "Subdirectory": { - "markdownDescription": "Specifies a mount path for your Amazon EFS file system. This is where DataSync reads or writes data (depending on if this is a source or destination location). By default, DataSync uses the root directory, but you can also include subdirectories.\n\n> You must specify a value with forward slashes (for example, `/path/to/folder` ).", + "markdownDescription": "Specifies a mount path for your Amazon EFS file system. This is where DataSync reads or writes data (depending on if this is a source or destination location) on your file system.\n\nBy default, DataSync uses the root directory (or [access point](https://docs.aws.amazon.com/efs/latest/ug/efs-access-points.html) if you provide one by using `AccessPointArn` ). You can also include subdirectories using forward slashes (for example, `/path/to/folder` ).", "title": "Subdirectory", "type": "string" }, @@ -62326,7 +62320,7 @@ "items": { "type": "string" }, - "markdownDescription": "The Amazon Resource Names (ARNs) of the agents that are used to connect to the HDFS cluster.", + "markdownDescription": "The Amazon Resource Names (ARNs) of the DataSync agents that can connect to your HDFS cluster.", "title": "AgentArns", "type": "array" }, @@ -62503,7 +62497,7 @@ }, "OnPremConfig": { "$ref": "#/definitions/AWS::DataSync::LocationNFS.OnPremConfig", - "markdownDescription": "Specifies the Amazon Resource Name (ARN) of the DataSync agent that want to connect to your NFS file server.\n\nYou can specify more than one agent. For more information, see [Using multiple agents for transfers](https://docs.aws.amazon.com/datasync/latest/userguide/multiple-agents.html) .", + "markdownDescription": "Specifies the Amazon Resource Name (ARN) of the DataSync agent that can connect to your NFS file server.\n\nYou can specify more than one agent. For more information, see [Using multiple DataSync agents](https://docs.aws.amazon.com/datasync/latest/userguide/do-i-need-datasync-agent.html#multiple-agents) .", "title": "OnPremConfig" }, "ServerHostname": { @@ -62569,7 +62563,7 @@ "items": { "type": "string" }, - "markdownDescription": "The Amazon Resource Names (ARNs) of the agents connecting to a transfer location.", + "markdownDescription": "The Amazon Resource Names (ARNs) of the DataSync agents that can connect to your NFS file server.\n\nYou can specify more than one agent. For more information, see [Using multiple DataSync agents](https://docs.aws.amazon.com/datasync/latest/userguide/do-i-need-datasync-agent.html#multiple-agents) .", "title": "AgentArns", "type": "array" } @@ -62623,7 +62617,7 @@ "items": { "type": "string" }, - "markdownDescription": "Specifies the Amazon Resource Names (ARNs) of the DataSync agents that can securely connect with your location.", + "markdownDescription": "Specifies the Amazon Resource Names (ARNs) of the DataSync agents that can connect with your object storage system.", "title": "AgentArns", "type": "array" }, @@ -63469,7 +63463,7 @@ "additionalProperties": false, "properties": { "ScheduleExpression": { - "markdownDescription": "Specifies your task schedule by using a cron expression in UTC time. For information about cron expression syntax, see the [*Amazon EventBridge User Guide*](https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-cron-expressions.html) .", + "markdownDescription": "Specifies your task schedule by using a cron or rate expression.\n\nUse cron expressions for task schedules that run on a specific time and day. For example, the following cron expression creates a task schedule that runs at 8 AM on the first Wednesday of every month:\n\n`cron(0 8 * * 3#1)`\n\nUse rate expressions for task schedules that run on a regular interval. For example, the following rate expression creates a task schedule that runs every 12 hours:\n\n`rate(12 hours)`\n\nFor information about cron and rate expression syntax, see the [*Amazon EventBridge User Guide*](https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-scheduled-rule-pattern.html) .", "title": "ScheduleExpression", "type": "string" }, @@ -64622,7 +64616,7 @@ "type": "string" }, "DisplayName": { - "markdownDescription": "The display name of the farm.", + "markdownDescription": "The display name of the farm.\n\n> This field can store any content. Escape or encode this content before displaying it on a webpage or any other system that might interpret the content of this field.", "title": "DisplayName", "type": "string" }, @@ -64704,7 +64698,7 @@ "type": "string" }, "DisplayName": { - "markdownDescription": "The display name of the fleet summary to update.", + "markdownDescription": "The display name of the fleet summary to update.\n\n> This field can store any content. Escape or encode this content before displaying it on a webpage or any other system that might interpret the content of this field.", "title": "DisplayName", "type": "string" }, @@ -65351,7 +65345,7 @@ "type": "string" }, "DisplayName": { - "markdownDescription": "The display name of the queue summary to update.", + "markdownDescription": "The display name of the queue summary to update.\n\n> This field can store any content. Escape or encode this content before displaying it on a webpage or any other system that might interpret the content of this field.", "title": "DisplayName", "type": "string" }, @@ -65700,7 +65694,7 @@ "additionalProperties": false, "properties": { "DisplayName": { - "markdownDescription": "The display name of the storage profile summary to update.", + "markdownDescription": "The display name of the storage profile summary to update.\n\n> This field can store any content. Escape or encode this content before displaying it on a webpage or any other system that might interpret the content of this field.", "title": "DisplayName", "type": "string" }, @@ -66593,7 +66587,7 @@ "type": "number" }, "CopyTagsToSnapshot": { - "markdownDescription": "", + "markdownDescription": "Set to `true` to copy all tags from the source cluster snapshot to the target cluster snapshot, and otherwise `false` . The default is `false` .", "title": "CopyTagsToSnapshot", "type": "boolean" }, @@ -66626,7 +66620,7 @@ "type": "array" }, "EngineVersion": { - "markdownDescription": "The version number of the database engine to use. The `--engine-version` will default to the latest major engine version. For production workloads, we recommend explicitly declaring this parameter with the intended major engine version.", + "markdownDescription": "The version number of the database engine to use. The `--engine-version` will default to the latest major engine version. For production workloads, we recommend explicitly declaring this parameter with the intended major engine version.\n\nChanging the `EngineVersion` will start an in-place engine version upgrade. Note that in-place engine version upgrade will cause downtime in the cluster. See [Amazon DocumentDB in-place major version upgrade](https://docs.aws.amazon.com/documentdb/latest/developerguide/docdb-mvu.html) before starting an in-place engine version upgrade.", "title": "EngineVersion", "type": "string" }, @@ -66681,7 +66675,7 @@ "type": "string" }, "StorageEncrypted": { - "markdownDescription": "Specifies whether the cluster is encrypted.", + "markdownDescription": "Specifies whether the cluster is encrypted.\n\nIf you specify `SourceDBClusterIdentifier` or `SnapshotIdentifier` and don\u2019t specify `StorageEncrypted` , the encryption property is inherited from the source cluster or snapshot (unless `KMSKeyId` is specified, in which case the restored cluster will be encrypted with that KMS key). If the source is encrypted and `StorageEncrypted` is specified to be true, the restored cluster will be encrypted (if you want to use a different KMS key, specify the `KMSKeyId` property as well). If the source is unencrypted and `StorageEncrypted` is specified to be true, then the `KMSKeyId` property must be specified. If the source is encrypted, don\u2019t specify `StorageEncrypted` to be false as opting out of encryption is not allowed.", "title": "StorageEncrypted", "type": "boolean" }, @@ -66872,7 +66866,7 @@ "type": "string" }, "CACertificateIdentifier": { - "markdownDescription": "The CA certificate identifier to use for the DB instance's server certificate.\n\nFor more information, see [Updating Your Amazon DocumentDB TLS Certificates](https://docs.aws.amazon.com/documentdb/latest/developerguide/ca_cert_rotation.html) and [Encrypting Data in Transit](https://docs.aws.amazon.com/documentdb/latest/developerguide/security.encryption.ssl.html) in the *Amazon DocumentDB Developer Guide* .", + "markdownDescription": "The identifier of the CA certificate for this DB instance.", "title": "CACertificateIdentifier", "type": "string" }, @@ -69794,7 +69788,7 @@ "type": "string" }, "WeightedCapacity": { - "markdownDescription": "The number of units provided by the specified instance type.\n\n> When specifying weights, the price used in the `lowest-price` and `price-capacity-optimized` allocation strategies is per *unit* hour (where the instance price is divided by the specified weight). However, if all the specified weights are above the requested `TargetCapacity` , resulting in only 1 instance being launched, the price used is per *instance* hour.", + "markdownDescription": "The number of units provided by the specified instance type. These are the same units that you chose to set the target capacity in terms of instances, or a performance characteristic such as vCPUs, memory, or I/O.\n\nIf the target capacity divided by this value is not a whole number, Amazon EC2 rounds the number of instances to the next whole number. If this value is not specified, the default is 1.\n\n> When specifying weights, the price used in the `lowest-price` and `price-capacity-optimized` allocation strategies is per *unit* hour (where the instance price is divided by the specified weight). However, if all the specified weights are above the requested `TargetCapacity` , resulting in only 1 instance being launched, the price used is per *instance* hour.", "title": "WeightedCapacity", "type": "number" } @@ -71163,7 +71157,7 @@ "type": "string" }, "Locale": { - "markdownDescription": "The locale of the IPAM pool. In IPAM, the locale is the AWS Region where you want to make an IPAM pool available for allocations. Only resources in the same Region as the locale of the pool can get IP address allocations from the pool. You can only allocate a CIDR for a VPC, for example, from an IPAM pool that shares a locale with the VPC\u2019s Region. Note that once you choose a Locale for a pool, you cannot modify it. If you choose an AWS Region for locale that has not been configured as an operating Region for the IPAM, you'll get an error.", + "markdownDescription": "The locale of the IPAM pool.\n\nThe locale for the pool should be one of the following:\n\n- An AWS Region where you want this IPAM pool to be available for allocations.\n- The network border group for an AWS Local Zone where you want this IPAM pool to be available for allocations ( [supported Local Zones](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-byoip.html#byoip-zone-avail) ). This option is only available for IPAM IPv4 pools in the public scope.\n\nIf you choose an AWS Region for locale that has not been configured as an operating Region for the IPAM, you'll get an error.", "title": "Locale", "type": "string" }, @@ -72593,7 +72587,7 @@ "items": { "$ref": "#/definitions/AWS::EC2::LaunchTemplate.LaunchTemplateTagSpecification" }, - "markdownDescription": "The tags to apply to the launch template on creation. To tag the launch template, the resource type must be `launch-template` .\n\nTo specify the tags for the resources that are created when an instance is launched, you must use [TagSpecifications](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ec2-launchtemplate.html#cfn-ec2-launchtemplate-tagspecifications) .", + "markdownDescription": "The tags to apply to the launch template on creation. To tag the launch template, the resource type must be `launch-template` .\n\nTo specify the tags for resources that are created during instance launch, use [TagSpecifications](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-launchtemplate-launchtemplatedata.html#cfn-ec2-launchtemplate-launchtemplatedata-tagspecifications) .", "title": "TagSpecifications", "type": "array" }, @@ -73182,7 +73176,7 @@ "title": "IamInstanceProfile" }, "ImageId": { - "markdownDescription": "The ID of the AMI. Alternatively, you can specify a Systems Manager parameter, which will resolve to an AMI ID on launch.\n\nValid formats:\n\n- `ami-17characters00000`\n- `resolve:ssm:parameter-name`\n- `resolve:ssm:parameter-name:version-number`\n- `resolve:ssm:parameter-name:label`\n\nFor more information, see [Use a Systems Manager parameter to find an AMI](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/finding-an-ami.html#using-systems-manager-parameter-to-find-AMI) in the *Amazon Elastic Compute Cloud User Guide* .", + "markdownDescription": "The ID of the AMI. Alternatively, you can specify a Systems Manager parameter, which will resolve to an AMI ID on launch.\n\nValid formats:\n\n- `ami-0ac394d6a3example`\n- `resolve:ssm:parameter-name`\n- `resolve:ssm:parameter-name:version-number`\n- `resolve:ssm:parameter-name:label`\n\nFor more information, see [Use a Systems Manager parameter to find an AMI](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/finding-an-ami.html#using-systems-manager-parameter-to-find-AMI) in the *Amazon Elastic Compute Cloud User Guide* .", "title": "ImageId", "type": "string" }, @@ -73282,7 +73276,7 @@ "items": { "$ref": "#/definitions/AWS::EC2::LaunchTemplate.TagSpecification" }, - "markdownDescription": "The tags to apply to the resources that are created during instance launch.\n\nTo tag a resource after it has been created, see [CreateTags](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateTags.html) .\n\nTo tag the launch template itself, use [TagSpecifications](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ec2-launchtemplate.html#cfn-ec2-launchtemplate-tagspecifications) .", + "markdownDescription": "The tags to apply to resources that are created during instance launch.\n\nTo tag the launch template itself, use [TagSpecifications](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ec2-launchtemplate.html#cfn-ec2-launchtemplate-tagspecifications) .", "title": "TagSpecifications", "type": "array" }, @@ -74142,7 +74136,7 @@ "items": { "type": "string" }, - "markdownDescription": "Secondary EIP allocation IDs. For more information, see [Create a NAT gateway](https://docs.aws.amazon.com/vpc/latest/userguide/vpc-nat-gateway.html#nat-gateway-creating) in the *Amazon VPC User Guide* .", + "markdownDescription": "Secondary EIP allocation IDs. For more information, see [Create a NAT gateway](https://docs.aws.amazon.com/vpc/latest/userguide/nat-gateway-working-with.html) in the *Amazon VPC User Guide* .", "title": "SecondaryAllocationIds", "type": "array" }, @@ -75020,7 +75014,7 @@ "type": "string" }, "destinationPrefixListId": { - "markdownDescription": "The prefix of the AWS service .", + "markdownDescription": "The prefix of the AWS service.", "title": "destinationPrefixListId", "type": "string" }, @@ -76315,12 +76309,12 @@ "items": { "$ref": "#/definitions/AWS::EC2::PrefixList.Entry" }, - "markdownDescription": "One or more entries for the prefix list.", + "markdownDescription": "The entries for the prefix list.", "title": "Entries", "type": "array" }, "MaxEntries": { - "markdownDescription": "The maximum number of entries for the prefix list.", + "markdownDescription": "The maximum number of entries for the prefix list. This property is required when you create a prefix list.", "title": "MaxEntries", "type": "number" }, @@ -77649,7 +77643,7 @@ "type": "string" }, "WeightedCapacity": { - "markdownDescription": "The number of units provided by the specified instance type.\n\n> When specifying weights, the price used in the `lowest-price` and `price-capacity-optimized` allocation strategies is per *unit* hour (where the instance price is divided by the specified weight). However, if all the specified weights are above the requested `TargetCapacity` , resulting in only 1 instance being launched, the price used is per *instance* hour.", + "markdownDescription": "The number of units provided by the specified instance type. These are the same units that you chose to set the target capacity in terms of instances, or a performance characteristic such as vCPUs, memory, or I/O.\n\nIf the target capacity divided by this value is not a whole number, Amazon EC2 rounds the number of instances to the next whole number. If this value is not specified, the default is 1.\n\n> When specifying weights, the price used in the `lowestPrice` and `priceCapacityOptimized` allocation strategies is per *unit* hour (where the instance price is divided by the specified weight). However, if all the specified weights are above the requested `TargetCapacity` , resulting in only 1 instance being launched, the price used is per *instance* hour.", "title": "WeightedCapacity", "type": "number" } @@ -77872,7 +77866,7 @@ "type": "string" }, "WeightedCapacity": { - "markdownDescription": "The number of units provided by the specified instance type. These are the same units that you chose to set the target capacity in terms of instances, or a performance characteristic such as vCPUs, memory, or I/O.\n\nIf the target capacity divided by this value is not a whole number, Amazon EC2 rounds the number of instances to the next whole number. If this value is not specified, the default is 1.", + "markdownDescription": "The number of units provided by the specified instance type. These are the same units that you chose to set the target capacity in terms of instances, or a performance characteristic such as vCPUs, memory, or I/O.\n\nIf the target capacity divided by this value is not a whole number, Amazon EC2 rounds the number of instances to the next whole number. If this value is not specified, the default is 1.\n\n> When specifying weights, the price used in the `lowestPrice` and `priceCapacityOptimized` allocation strategies is per *unit* hour (where the instance price is divided by the specified weight). However, if all the specified weights are above the requested `TargetCapacity` , resulting in only 1 instance being launched, the price used is per *instance* hour.", "title": "WeightedCapacity", "type": "number" } @@ -78197,7 +78191,7 @@ "type": "string" }, "EnableDns64": { - "markdownDescription": "Indicates whether DNS queries made to the Amazon-provided DNS Resolver in this subnet should return synthetic IPv6 addresses for IPv4-only destinations. For more information, see [DNS64 and NAT64](https://docs.aws.amazon.com/vpc/latest/userguide/vpc-nat-gateway.html#nat-gateway-nat64-dns64) in the *Amazon Virtual Private Cloud User Guide* .", + "markdownDescription": "Indicates whether DNS queries made to the Amazon-provided DNS Resolver in this subnet should return synthetic IPv6 addresses for IPv4-only destinations.\n\n> You must first configure a NAT gateway in a public subnet (separate from the subnet containing the IPv6-only workloads). For example, the subnet containing the NAT gateway should have a `0.0.0.0/0` route pointing to the internet gateway. For more information, see [Configure DNS64 and NAT64](https://docs.aws.amazon.com/vpc/latest/userguide/nat-gateway-nat64-dns64.html#nat-gateway-nat64-dns64-walkthrough) in the *Amazon Virtual Private Cloud User Guide* .", "title": "EnableDns64", "type": "boolean" }, @@ -78225,8 +78219,6 @@ "items": { "type": "string" }, - "markdownDescription": "The IPv6 network ranges for the subnet, in CIDR notation.", - "title": "Ipv6CidrBlocks", "type": "array" }, "Ipv6IpamPoolId": { @@ -80489,7 +80481,7 @@ "additionalProperties": false, "properties": { "PolicyDocument": { - "markdownDescription": "An endpoint policy, which controls access to the service from the VPC. The default endpoint policy allows full access to the service. Endpoint policies are supported only for gateway and interface endpoints.\n\nFor CloudFormation templates in YAML, you can provide the policy in JSON or YAML format. AWS CloudFormation converts YAML policies to JSON format before calling the API to create or modify the VPC endpoint.", + "markdownDescription": "An endpoint policy, which controls access to the service from the VPC. The default endpoint policy allows full access to the service. Endpoint policies are supported only for gateway and interface endpoints.\n\nFor CloudFormation templates in YAML, you can provide the policy in JSON or YAML format. For example, if you have a JSON policy, you can convert it to YAML before including it in the YAML template, and AWS CloudFormation converts the policy to JSON format before calling the API actions for AWS PrivateLink . Alternatively, you can include the JSON directly in the YAML, as shown in the following `Properties` section:\n\n`Properties: VpcEndpointType: 'Interface' ServiceName: !Sub 'com.amazonaws.${AWS::Region}.logs' PolicyDocument: '{ \"Version\":\"2012-10-17\", \"Statement\": [{ \"Effect\":\"Allow\", \"Principal\":\"*\", \"Action\":[\"logs:Describe*\",\"logs:Get*\",\"logs:List*\",\"logs:FilterLogEvents\"], \"Resource\":\"*\" }] }'`", "title": "PolicyDocument", "type": "object" }, @@ -82778,7 +82770,7 @@ "additionalProperties": false, "properties": { "EncryptionType": { - "markdownDescription": "The encryption type to use.\n\nIf you use the `KMS` encryption type, the contents of the repository will be encrypted using server-side encryption with AWS Key Management Service key stored in AWS KMS . When you use AWS KMS to encrypt your data, you can either use the default AWS managed AWS KMS key for Amazon ECR, or specify your own AWS KMS key, which you already created. For more information, see [Protecting data using server-side encryption with an AWS KMS key stored in AWS Key Management Service (SSE-KMS)](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html) in the *Amazon Simple Storage Service Console Developer Guide* .\n\nIf you use the `AES256` encryption type, Amazon ECR uses server-side encryption with Amazon S3-managed encryption keys which encrypts the images in the repository using an AES-256 encryption algorithm. For more information, see [Protecting data using server-side encryption with Amazon S3-managed encryption keys (SSE-S3)](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) in the *Amazon Simple Storage Service Console Developer Guide* .", + "markdownDescription": "The encryption type to use.\n\nIf you use the `KMS` encryption type, the contents of the repository will be encrypted using server-side encryption with AWS Key Management Service key stored in AWS KMS . When you use AWS KMS to encrypt your data, you can either use the default AWS managed AWS KMS key for Amazon ECR, or specify your own AWS KMS key, which you already created.\n\nIf you use the `KMS_DSSE` encryption type, the contents of the repository will be encrypted with two layers of encryption using server-side encryption with the AWS KMS Management Service key stored in AWS KMS . Similar to the `KMS` encryption type, you can either use the default AWS managed AWS KMS key for Amazon ECR, or specify your own AWS KMS key, which you've already created.\n\nIf you use the `AES256` encryption type, Amazon ECR uses server-side encryption with Amazon S3-managed encryption keys which encrypts the images in the repository using an AES256 encryption algorithm.\n\nFor more information, see [Amazon ECR encryption at rest](https://docs.aws.amazon.com/AmazonECR/latest/userguide/encryption-at-rest.html) in the *Amazon Elastic Container Registry User Guide* .", "title": "EncryptionType", "type": "string" }, @@ -82859,37 +82851,37 @@ "items": { "type": "string" }, - "markdownDescription": "", + "markdownDescription": "A list of enumerable Strings representing the repository creation scenarios that this template will apply towards. The two supported scenarios are PULL_THROUGH_CACHE and REPLICATION", "title": "AppliedFor", "type": "array" }, "Description": { - "markdownDescription": "", + "markdownDescription": "The description associated with the repository creation template.", "title": "Description", "type": "string" }, "EncryptionConfiguration": { "$ref": "#/definitions/AWS::ECR::RepositoryCreationTemplate.EncryptionConfiguration", - "markdownDescription": "The encryption configuration for the repository. This determines how the contents of your repository are encrypted at rest.\n\nBy default, when no encryption configuration is set or the `AES256` encryption type is used, Amazon ECR uses server-side encryption with Amazon S3-managed encryption keys which encrypts your data at rest using an AES-256 encryption algorithm. This does not require any action on your part.\n\nFor more control over the encryption of the contents of your repository, you can use server-side encryption with AWS Key Management Service key stored in AWS Key Management Service ( AWS KMS ) to encrypt your images. For more information, see [Amazon ECR encryption at rest](https://docs.aws.amazon.com/AmazonECR/latest/userguide/encryption-at-rest.html) in the *Amazon Elastic Container Registry User Guide* .", + "markdownDescription": "The encryption configuration associated with the repository creation template.", "title": "EncryptionConfiguration" }, "ImageTagMutability": { - "markdownDescription": "", + "markdownDescription": "The tag mutability setting for the repository. If this parameter is omitted, the default setting of MUTABLE will be used which will allow image tags to be overwritten. If IMMUTABLE is specified, all image tags within the repository will be immutable which will prevent them from being overwritten.", "title": "ImageTagMutability", "type": "string" }, "LifecyclePolicy": { - "markdownDescription": "", + "markdownDescription": "The lifecycle policy to use for repositories created using the template.", "title": "LifecyclePolicy", "type": "string" }, "Prefix": { - "markdownDescription": "", + "markdownDescription": "The repository namespace prefix associated with the repository creation template.", "title": "Prefix", "type": "string" }, "RepositoryPolicy": { - "markdownDescription": "", + "markdownDescription": "he repository policy to apply to repositories created using the template. A repository policy is a permissions policy associated with a repository to control access permissions.", "title": "RepositoryPolicy", "type": "string" }, @@ -82897,7 +82889,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "The tags attached to the resource.", + "markdownDescription": "The metadata to apply to the repository to help you categorize and organize. Each tag consists of a key and an optional value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.", "title": "ResourceTags", "type": "array" } @@ -82933,7 +82925,7 @@ "additionalProperties": false, "properties": { "EncryptionType": { - "markdownDescription": "The encryption type to use.\n\nIf you use the `KMS` encryption type, the contents of the repository will be encrypted using server-side encryption with AWS Key Management Service key stored in AWS KMS . When you use AWS KMS to encrypt your data, you can either use the default AWS managed AWS KMS key for Amazon ECR, or specify your own AWS KMS key, which you already created. For more information, see [Protecting data using server-side encryption with an AWS KMS key stored in AWS Key Management Service (SSE-KMS)](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html) in the *Amazon Simple Storage Service Console Developer Guide* .\n\nIf you use the `AES256` encryption type, Amazon ECR uses server-side encryption with Amazon S3-managed encryption keys which encrypts the images in the repository using an AES-256 encryption algorithm. For more information, see [Protecting data using server-side encryption with Amazon S3-managed encryption keys (SSE-S3)](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) in the *Amazon Simple Storage Service Console Developer Guide* .", + "markdownDescription": "The encryption type to use.\n\nIf you use the `KMS` encryption type, the contents of the repository will be encrypted using server-side encryption with AWS Key Management Service key stored in AWS KMS . When you use AWS KMS to encrypt your data, you can either use the default AWS managed AWS KMS key for Amazon ECR, or specify your own AWS KMS key, which you already created.\n\nIf you use the `KMS_DSSE` encryption type, the contents of the repository will be encrypted with two layers of encryption using server-side encryption with the AWS KMS Management Service key stored in AWS KMS . Similar to the `KMS` encryption type, you can either use the default AWS managed AWS KMS key for Amazon ECR, or specify your own AWS KMS key, which you've already created.\n\nIf you use the `AES256` encryption type, Amazon ECR uses server-side encryption with Amazon S3-managed encryption keys which encrypts the images in the repository using an AES256 encryption algorithm.\n\nFor more information, see [Amazon ECR encryption at rest](https://docs.aws.amazon.com/AmazonECR/latest/userguide/encryption-at-rest.html) in the *Amazon Elastic Container Registry User Guide* .", "title": "EncryptionType", "type": "string" }, @@ -83066,7 +83058,7 @@ "type": "number" }, "MaximumScalingStepSize": { - "markdownDescription": "The maximum number of Amazon EC2 instances that Amazon ECS will scale out at one time. The scale in process is not affected by this parameter. If this parameter is omitted, the default value of `10000` is used.", + "markdownDescription": "The maximum number of Amazon EC2 instances that Amazon ECS will scale out at one time. If this parameter is omitted, the default value of `10000` is used.", "title": "MaximumScalingStepSize", "type": "number" }, @@ -83698,7 +83690,7 @@ "items": { "type": "string" }, - "markdownDescription": "The IDs of the security groups associated with the task or service. If you don't specify a security group, the default security group for the VPC is used. There's a limit of 5 security groups that can be specified per `AwsVpcConfiguration` .\n\n> All specified security groups must be from the same VPC.", + "markdownDescription": "The IDs of the security groups associated with the task or service. If you don't specify a security group, the default security group for the VPC is used. There's a limit of 5 security groups that can be specified per `awsvpcConfiguration` .\n\n> All specified security groups must be from the same VPC.", "title": "SecurityGroups", "type": "array" }, @@ -83706,7 +83698,7 @@ "items": { "type": "string" }, - "markdownDescription": "The IDs of the subnets associated with the task or service. There's a limit of 16 subnets that can be specified per `AwsVpcConfiguration` .\n\n> All specified subnets must be from the same VPC.", + "markdownDescription": "The IDs of the subnets associated with the task or service. There's a limit of 16 subnets that can be specified per `awsvpcConfiguration` .\n\n> All specified subnets must be from the same VPC.", "title": "Subnets", "type": "array" } @@ -83797,12 +83789,12 @@ "title": "DeploymentCircuitBreaker" }, "MaximumPercent": { - "markdownDescription": "If a service is using the rolling update ( `ECS` ) deployment type, the `maximumPercent` parameter represents an upper limit on the number of your service's tasks that are allowed in the `RUNNING` or `PENDING` state during a deployment, as a percentage of the `desiredCount` (rounded down to the nearest integer). This parameter enables you to define the deployment batch size. For example, if your service is using the `REPLICA` service scheduler and has a `desiredCount` of four tasks and a `maximumPercent` value of 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default `maximumPercent` value for a service using the `REPLICA` service scheduler is 200%.\n\nIf a service is using either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types and tasks that use the EC2 launch type, the *maximum percent* value is set to the default value and is used to define the upper limit on the number of the tasks in the service that remain in the `RUNNING` state while the container instances are in the `DRAINING` state. If the tasks in the service use the Fargate launch type, the maximum percent value is not used, although it is returned when describing your service.", + "markdownDescription": "If a service is using the rolling update ( `ECS` ) deployment type, the `maximumPercent` parameter represents an upper limit on the number of your service's tasks that are allowed in the `RUNNING` or `PENDING` state during a deployment, as a percentage of the `desiredCount` (rounded down to the nearest integer). This parameter enables you to define the deployment batch size. For example, if your service is using the `REPLICA` service scheduler and has a `desiredCount` of four tasks and a `maximumPercent` value of 200%, the scheduler may start four new tasks before stopping the four older tasks (provided that the cluster resources required to do this are available). The default `maximumPercent` value for a service using the `REPLICA` service scheduler is 200%.\n\nIf a service is using either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types, and tasks in the service use the EC2 launch type, the *maximum percent* value is set to the default value. The *maximum percent* value is used to define the upper limit on the number of the tasks in the service that remain in the `RUNNING` state while the container instances are in the `DRAINING` state.\n\n> You can't specify a custom `maximumPercent` value for a service that uses either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types and has tasks that use the EC2 launch type. \n\nIf the tasks in the service use the Fargate launch type, the maximum percent value is not used, although it is returned when describing your service.", "title": "MaximumPercent", "type": "number" }, "MinimumHealthyPercent": { - "markdownDescription": "If a service is using the rolling update ( `ECS` ) deployment type, the `minimumHealthyPercent` represents a lower limit on the number of your service's tasks that must remain in the `RUNNING` state during a deployment, as a percentage of the `desiredCount` (rounded up to the nearest integer). This parameter enables you to deploy without using additional cluster capacity. For example, if your service has a `desiredCount` of four tasks and a `minimumHealthyPercent` of 50%, the service scheduler may stop two existing tasks to free up cluster capacity before starting two new tasks.\n\nFor services that *do not* use a load balancer, the following should be noted:\n\n- A service is considered healthy if all essential containers within the tasks in the service pass their health checks.\n- If a task has no essential containers with a health check defined, the service scheduler will wait for 40 seconds after a task reaches a `RUNNING` state before the task is counted towards the minimum healthy percent total.\n- If a task has one or more essential containers with a health check defined, the service scheduler will wait for the task to reach a healthy status before counting it towards the minimum healthy percent total. A task is considered healthy when all essential containers within the task have passed their health checks. The amount of time the service scheduler can wait for is determined by the container health check settings.\n\nFor services that *do* use a load balancer, the following should be noted:\n\n- If a task has no essential containers with a health check defined, the service scheduler will wait for the load balancer target group health check to return a healthy status before counting the task towards the minimum healthy percent total.\n- If a task has an essential container with a health check defined, the service scheduler will wait for both the task to reach a healthy status and the load balancer target group health check to return a healthy status before counting the task towards the minimum healthy percent total.\n\nThe default value for a replica service for `minimumHealthyPercent` is 100%. The default `minimumHealthyPercent` value for a service using the `DAEMON` service schedule is 0% for the AWS CLI , the AWS SDKs, and the APIs and 50% for the AWS Management Console.\n\nThe minimum number of healthy tasks during a deployment is the `desiredCount` multiplied by the `minimumHealthyPercent` /100, rounded up to the nearest integer value.\n\nIf a service is using either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types and is running tasks that use the EC2 launch type, the *minimum healthy percent* value is set to the default value and is used to define the lower limit on the number of the tasks in the service that remain in the `RUNNING` state while the container instances are in the `DRAINING` state. If a service is using either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types and is running tasks that use the Fargate launch type, the minimum healthy percent value is not used, although it is returned when describing your service.", + "markdownDescription": "If a service is using the rolling update ( `ECS` ) deployment type, the `minimumHealthyPercent` represents a lower limit on the number of your service's tasks that must remain in the `RUNNING` state during a deployment, as a percentage of the `desiredCount` (rounded up to the nearest integer). This parameter enables you to deploy without using additional cluster capacity. For example, if your service has a `desiredCount` of four tasks and a `minimumHealthyPercent` of 50%, the service scheduler may stop two existing tasks to free up cluster capacity before starting two new tasks.\n\nFor services that *do not* use a load balancer, the following should be noted:\n\n- A service is considered healthy if all essential containers within the tasks in the service pass their health checks.\n- If a task has no essential containers with a health check defined, the service scheduler will wait for 40 seconds after a task reaches a `RUNNING` state before the task is counted towards the minimum healthy percent total.\n- If a task has one or more essential containers with a health check defined, the service scheduler will wait for the task to reach a healthy status before counting it towards the minimum healthy percent total. A task is considered healthy when all essential containers within the task have passed their health checks. The amount of time the service scheduler can wait for is determined by the container health check settings.\n\nFor services that *do* use a load balancer, the following should be noted:\n\n- If a task has no essential containers with a health check defined, the service scheduler will wait for the load balancer target group health check to return a healthy status before counting the task towards the minimum healthy percent total.\n- If a task has an essential container with a health check defined, the service scheduler will wait for both the task to reach a healthy status and the load balancer target group health check to return a healthy status before counting the task towards the minimum healthy percent total.\n\nThe default value for a replica service for `minimumHealthyPercent` is 100%. The default `minimumHealthyPercent` value for a service using the `DAEMON` service schedule is 0% for the AWS CLI , the AWS SDKs, and the APIs and 50% for the AWS Management Console.\n\nThe minimum number of healthy tasks during a deployment is the `desiredCount` multiplied by the `minimumHealthyPercent` /100, rounded up to the nearest integer value.\n\nIf a service is using either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types and is running tasks that use the EC2 launch type, the *minimum healthy percent* value is set to the default value. The *minimum healthy percent* value is used to define the lower limit on the number of the tasks in the service that remain in the `RUNNING` state while the container instances are in the `DRAINING` state.\n\n> You can't specify a custom `minimumHealthyPercent` value for a service that uses either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types and has tasks that use the EC2 launch type. \n\nIf a service is using either the blue/green ( `CODE_DEPLOY` ) or `EXTERNAL` deployment types and is running tasks that use the Fargate launch type, the minimum healthy percent value is not used, although it is returned when describing your service.", "title": "MinimumHealthyPercent", "type": "number" } @@ -83883,7 +83875,7 @@ }, "Options": { "additionalProperties": true, - "markdownDescription": "The configuration options to send to the log driver. This parameter requires version 1.19 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`", + "markdownDescription": "The configuration options to send to the log driver.\n\nThe options you can specify depend on the log driver. Some of the options you can specify when you use the `awslogs` log driver to route logs to Amazon CloudWatch include the following:\n\n- **awslogs-create-group** - Required: No\n\nSpecify whether you want the log group to be created automatically. If this option isn't specified, it defaults to `false` .\n\n> Your IAM policy must include the `logs:CreateLogGroup` permission before you attempt to use `awslogs-create-group` .\n- **awslogs-region** - Required: Yes\n\nSpecify the AWS Region that the `awslogs` log driver is to send your Docker logs to. You can choose to send all of your logs from clusters in different Regions to a single region in CloudWatch Logs. This is so that they're all visible in one location. Otherwise, you can separate them by Region for more granularity. Make sure that the specified log group exists in the Region that you specify with this option.\n- **awslogs-group** - Required: Yes\n\nMake sure to specify a log group that the `awslogs` log driver sends its log streams to.\n- **awslogs-stream-prefix** - Required: Yes, when using the Fargate launch type.Optional for the EC2 launch type, required for the Fargate launch type.\n\nUse the `awslogs-stream-prefix` option to associate a log stream with the specified prefix, the container name, and the ID of the Amazon ECS task that the container belongs to. If you specify a prefix with this option, then the log stream takes the format `prefix-name/container-name/ecs-task-id` .\n\nIf you don't specify a prefix with this option, then the log stream is named after the container ID that's assigned by the Docker daemon on the container instance. Because it's difficult to trace logs back to the container that sent them with just the Docker container ID (which is only available on the container instance), we recommend that you specify a prefix with this option.\n\nFor Amazon ECS services, you can use the service name as the prefix. Doing so, you can trace log streams to the service that the container belongs to, the name of the container that sent them, and the ID of the task that the container belongs to.\n\nYou must specify a stream-prefix for your logs to have your logs appear in the Log pane when using the Amazon ECS console.\n- **awslogs-datetime-format** - Required: No\n\nThis option defines a multiline start pattern in Python `strftime` format. A log message consists of a line that matches the pattern and any following lines that don\u2019t match the pattern. The matched line is the delimiter between log messages.\n\nOne example of a use case for using this format is for parsing output such as a stack dump, which might otherwise be logged in multiple entries. The correct pattern allows it to be captured in a single entry.\n\nFor more information, see [awslogs-datetime-format](https://docs.aws.amazon.com/https://docs.docker.com/config/containers/logging/awslogs/#awslogs-datetime-format) .\n\nYou cannot configure both the `awslogs-datetime-format` and `awslogs-multiline-pattern` options.\n\n> Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance.\n- **awslogs-multiline-pattern** - Required: No\n\nThis option defines a multiline start pattern that uses a regular expression. A log message consists of a line that matches the pattern and any following lines that don\u2019t match the pattern. The matched line is the delimiter between log messages.\n\nFor more information, see [awslogs-multiline-pattern](https://docs.aws.amazon.com/https://docs.docker.com/config/containers/logging/awslogs/#awslogs-multiline-pattern) .\n\nThis option is ignored if `awslogs-datetime-format` is also configured.\n\nYou cannot configure both the `awslogs-datetime-format` and `awslogs-multiline-pattern` options.\n\n> Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance.\n- **mode** - Required: No\n\nValid values: `non-blocking` | `blocking`\n\nThis option defines the delivery mode of log messages from the container to CloudWatch Logs. The delivery mode you choose affects application availability when the flow of logs from container to CloudWatch is interrupted.\n\nIf you use the `blocking` mode and the flow of logs to CloudWatch is interrupted, calls from container code to write to the `stdout` and `stderr` streams will block. The logging thread of the application will block as a result. This may cause the application to become unresponsive and lead to container healthcheck failure.\n\nIf you use the `non-blocking` mode, the container's logs are instead stored in an in-memory intermediate buffer configured with the `max-buffer-size` option. This prevents the application from becoming unresponsive when logs cannot be sent to CloudWatch. We recommend using this mode if you want to ensure service availability and are okay with some log loss. For more information, see [Preventing log loss with non-blocking mode in the `awslogs` container log driver](https://docs.aws.amazon.com/containers/preventing-log-loss-with-non-blocking-mode-in-the-awslogs-container-log-driver/) .\n- **max-buffer-size** - Required: No\n\nDefault value: `1m`\n\nWhen `non-blocking` mode is used, the `max-buffer-size` log option controls the size of the buffer that's used for intermediate message storage. Make sure to specify an adequate buffer size based on your application. When the buffer fills up, further logs cannot be stored. Logs that cannot be stored are lost.\n\nTo route logs using the `splunk` log router, you need to specify a `splunk-token` and a `splunk-url` .\n\nWhen you use the `awsfirelens` log router to route logs to an AWS Service or AWS Partner Network destination for log storage and analytics, you can set the `log-driver-buffer-limit` option to limit the number of events that are buffered in memory, before being sent to the log router container. It can help to resolve potential log loss issue because high throughput might result in memory running out for the buffer inside of Docker.\n\nOther options you can specify when using `awsfirelens` to route logs depend on the destination. When you export logs to Amazon Data Firehose, you can specify the AWS Region with `region` and a name for the log stream with `delivery_stream` .\n\nWhen you export logs to Amazon Kinesis Data Streams, you can specify an AWS Region with `region` and a data stream name with `stream` .\n\nWhen you export logs to Amazon OpenSearch Service, you can specify options like `Name` , `Host` (OpenSearch Service endpoint without protocol), `Port` , `Index` , `Type` , `Aws_auth` , `Aws_region` , `Suppress_Type_Name` , and `tls` .\n\nWhen you export logs to Amazon S3, you can specify the bucket using the `bucket` option. You can also specify `region` , `total_file_size` , `upload_timeout` , and `use_put_object` as options.\n\nThis parameter requires version 1.19 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" @@ -84001,7 +83993,7 @@ }, "LogConfiguration": { "$ref": "#/definitions/AWS::ECS::Service.LogConfiguration", - "markdownDescription": "The log configuration for the container. This parameter maps to `LogConfig` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--log-driver` option to [`docker run`](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/commandline/run/) .\n\nBy default, containers use the same logging driver that the Docker daemon uses. However, the container might use a different logging driver than the Docker daemon by specifying a log driver configuration in the container definition. For more information about the options for different supported log drivers, see [Configure logging drivers](https://docs.aws.amazon.com/https://docs.docker.com/engine/admin/logging/overview/) in the Docker documentation.\n\nUnderstand the following when specifying a log configuration for your containers.\n\n- Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon. Additional log drivers may be available in future releases of the Amazon ECS container agent.\n\nFor tasks on AWS Fargate , the supported log drivers are `awslogs` , `splunk` , and `awsfirelens` .\n\nFor tasks hosted on Amazon EC2 instances, the supported log drivers are `awslogs` , `fluentd` , `gelf` , `json-file` , `journald` , `syslog` , `splunk` , and `awsfirelens` .\n- This parameter requires version 1.18 of the Docker Remote API or greater on your container instance.\n- For tasks that are hosted on Amazon EC2 instances, the Amazon ECS container agent must register the available logging drivers with the `ECS_AVAILABLE_LOGGING_DRIVERS` environment variable before containers placed on that instance can use these log configuration options. For more information, see [Amazon ECS container agent configuration](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) in the *Amazon Elastic Container Service Developer Guide* .\n- For tasks that are on AWS Fargate , because you don't have access to the underlying infrastructure your tasks are hosted on, any additional software needed must be installed outside of the task. For example, the Fluentd output aggregators or a remote host running Logstash to send Gelf logs to.", + "markdownDescription": "The log configuration for the container. This parameter maps to `LogConfig` in the docker container create command and the `--log-driver` option to docker run.\n\nBy default, containers use the same logging driver that the Docker daemon uses. However, the container might use a different logging driver than the Docker daemon by specifying a log driver configuration in the container definition.\n\nUnderstand the following when specifying a log configuration for your containers.\n\n- Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon. Additional log drivers may be available in future releases of the Amazon ECS container agent.\n\nFor tasks on AWS Fargate , the supported log drivers are `awslogs` , `splunk` , and `awsfirelens` .\n\nFor tasks hosted on Amazon EC2 instances, the supported log drivers are `awslogs` , `fluentd` , `gelf` , `json-file` , `journald` , `syslog` , `splunk` , and `awsfirelens` .\n- This parameter requires version 1.18 of the Docker Remote API or greater on your container instance.\n- For tasks that are hosted on Amazon EC2 instances, the Amazon ECS container agent must register the available logging drivers with the `ECS_AVAILABLE_LOGGING_DRIVERS` environment variable before containers placed on that instance can use these log configuration options. For more information, see [Amazon ECS container agent configuration](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) in the *Amazon Elastic Container Service Developer Guide* .\n- For tasks that are on AWS Fargate , because you don't have access to the underlying infrastructure your tasks are hosted on, any additional software needed must be installed outside of the task. For example, the Fluentd output aggregators or a remote host running Logstash to send Gelf logs to.", "title": "LogConfiguration" }, "Namespace": { @@ -84295,7 +84287,7 @@ "type": "array" }, "IpcMode": { - "markdownDescription": "The IPC resource namespace to use for the containers in the task. The valid values are `host` , `task` , or `none` . If `host` is specified, then all containers within the tasks that specified the `host` IPC mode on the same container instance share the same IPC resources with the host Amazon EC2 instance. If `task` is specified, all containers within the specified task share the same IPC resources. If `none` is specified, then IPC resources within the containers of a task are private and not shared with other containers in a task or on the container instance. If no value is specified, then the IPC resource namespace sharing depends on the Docker daemon setting on the container instance. For more information, see [IPC settings](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#ipc-settings---ipc) in the *Docker run reference* .\n\nIf the `host` IPC mode is used, be aware that there is a heightened risk of undesired IPC namespace expose. For more information, see [Docker security](https://docs.aws.amazon.com/https://docs.docker.com/engine/security/security/) .\n\nIf you are setting namespaced kernel parameters using `systemControls` for the containers in the task, the following will apply to your IPC resource namespace. For more information, see [System Controls](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html) in the *Amazon Elastic Container Service Developer Guide* .\n\n- For tasks that use the `host` IPC mode, IPC namespace related `systemControls` are not supported.\n- For tasks that use the `task` IPC mode, IPC namespace related `systemControls` will apply to all containers within a task.\n\n> This parameter is not supported for Windows containers or tasks run on AWS Fargate .", + "markdownDescription": "The IPC resource namespace to use for the containers in the task. The valid values are `host` , `task` , or `none` . If `host` is specified, then all containers within the tasks that specified the `host` IPC mode on the same container instance share the same IPC resources with the host Amazon EC2 instance. If `task` is specified, all containers within the specified task share the same IPC resources. If `none` is specified, then IPC resources within the containers of a task are private and not shared with other containers in a task or on the container instance. If no value is specified, then the IPC resource namespace sharing depends on the Docker daemon setting on the container instance.\n\nIf the `host` IPC mode is used, be aware that there is a heightened risk of undesired IPC namespace expose.\n\nIf you are setting namespaced kernel parameters using `systemControls` for the containers in the task, the following will apply to your IPC resource namespace. For more information, see [System Controls](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html) in the *Amazon Elastic Container Service Developer Guide* .\n\n- For tasks that use the `host` IPC mode, IPC namespace related `systemControls` are not supported.\n- For tasks that use the `task` IPC mode, IPC namespace related `systemControls` will apply to all containers within a task.\n\n> This parameter is not supported for Windows containers or tasks run on AWS Fargate .", "title": "IpcMode", "type": "string" }, @@ -84305,12 +84297,12 @@ "type": "string" }, "NetworkMode": { - "markdownDescription": "The Docker networking mode to use for the containers in the task. The valid values are `none` , `bridge` , `awsvpc` , and `host` . If no network mode is specified, the default is `bridge` .\n\nFor Amazon ECS tasks on Fargate, the `awsvpc` network mode is required. For Amazon ECS tasks on Amazon EC2 Linux instances, any network mode can be used. For Amazon ECS tasks on Amazon EC2 Windows instances, `` or `awsvpc` can be used. If the network mode is set to `none` , you cannot specify port mappings in your container definitions, and the tasks containers do not have external connectivity. The `host` and `awsvpc` network modes offer the highest networking performance for containers because they use the EC2 network stack instead of the virtualized network stack provided by the `bridge` mode.\n\nWith the `host` and `awsvpc` network modes, exposed container ports are mapped directly to the corresponding host port (for the `host` network mode) or the attached elastic network interface port (for the `awsvpc` network mode), so you cannot take advantage of dynamic host port mappings.\n\n> When using the `host` network mode, you should not run containers using the root user (UID 0). It is considered best practice to use a non-root user. \n\nIf the network mode is `awsvpc` , the task is allocated an elastic network interface, and you must specify a `NetworkConfiguration` value when you create a service or run a task with the task definition. For more information, see [Task Networking](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-networking.html) in the *Amazon Elastic Container Service Developer Guide* .\n\nIf the network mode is `host` , you cannot run multiple instantiations of the same task on a single container instance when port mappings are used.\n\nFor more information, see [Network settings](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#network-settings) in the *Docker run reference* .", + "markdownDescription": "The Docker networking mode to use for the containers in the task. The valid values are `none` , `bridge` , `awsvpc` , and `host` . If no network mode is specified, the default is `bridge` .\n\nFor Amazon ECS tasks on Fargate, the `awsvpc` network mode is required. For Amazon ECS tasks on Amazon EC2 Linux instances, any network mode can be used. For Amazon ECS tasks on Amazon EC2 Windows instances, `` or `awsvpc` can be used. If the network mode is set to `none` , you cannot specify port mappings in your container definitions, and the tasks containers do not have external connectivity. The `host` and `awsvpc` network modes offer the highest networking performance for containers because they use the EC2 network stack instead of the virtualized network stack provided by the `bridge` mode.\n\nWith the `host` and `awsvpc` network modes, exposed container ports are mapped directly to the corresponding host port (for the `host` network mode) or the attached elastic network interface port (for the `awsvpc` network mode), so you cannot take advantage of dynamic host port mappings.\n\n> When using the `host` network mode, you should not run containers using the root user (UID 0). It is considered best practice to use a non-root user. \n\nIf the network mode is `awsvpc` , the task is allocated an elastic network interface, and you must specify a [NetworkConfiguration](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_NetworkConfiguration.html) value when you create a service or run a task with the task definition. For more information, see [Task Networking](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-networking.html) in the *Amazon Elastic Container Service Developer Guide* .\n\nIf the network mode is `host` , you cannot run multiple instantiations of the same task on a single container instance when port mappings are used.", "title": "NetworkMode", "type": "string" }, "PidMode": { - "markdownDescription": "The process namespace to use for the containers in the task. The valid values are `host` or `task` . On Fargate for Linux containers, the only valid value is `task` . For example, monitoring sidecars might need `pidMode` to access information about other containers running in the same task.\n\nIf `host` is specified, all containers within the tasks that specified the `host` PID mode on the same container instance share the same process namespace with the host Amazon EC2 instance.\n\nIf `task` is specified, all containers within the specified task share the same process namespace.\n\nIf no value is specified, the default is a private namespace for each container. For more information, see [PID settings](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#pid-settings---pid) in the *Docker run reference* .\n\nIf the `host` PID mode is used, there's a heightened risk of undesired process namespace exposure. For more information, see [Docker security](https://docs.aws.amazon.com/https://docs.docker.com/engine/security/security/) .\n\n> This parameter is not supported for Windows containers. > This parameter is only supported for tasks that are hosted on AWS Fargate if the tasks are using platform version `1.4.0` or later (Linux). This isn't supported for Windows containers on Fargate.", + "markdownDescription": "The process namespace to use for the containers in the task. The valid values are `host` or `task` . On Fargate for Linux containers, the only valid value is `task` . For example, monitoring sidecars might need `pidMode` to access information about other containers running in the same task.\n\nIf `host` is specified, all containers within the tasks that specified the `host` PID mode on the same container instance share the same process namespace with the host Amazon EC2 instance.\n\nIf `task` is specified, all containers within the specified task share the same process namespace.\n\nIf no value is specified, the default is a private namespace for each container.\n\nIf the `host` PID mode is used, there's a heightened risk of undesired process namespace exposure.\n\n> This parameter is not supported for Windows containers. > This parameter is only supported for tasks that are hosted on AWS Fargate if the tasks are using platform version `1.4.0` or later (Linux). This isn't supported for Windows containers on Fargate.", "title": "PidMode", "type": "string" }, @@ -84349,7 +84341,7 @@ "type": "array" }, "TaskRoleArn": { - "markdownDescription": "The short name or full Amazon Resource Name (ARN) of the AWS Identity and Access Management role that grants containers in the task permission to call AWS APIs on your behalf. For informationabout the required IAM roles for Amazon ECS, see [IAM roles for Amazon ECS](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/security-ecs-iam-role-overview.html) in the *Amazon Elastic Container Service Developer Guide* .", + "markdownDescription": "The short name or full Amazon Resource Name (ARN) of the AWS Identity and Access Management role that grants containers in the task permission to call AWS APIs on your behalf. For more information, see [Amazon ECS Task Role](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html) in the *Amazon Elastic Container Service Developer Guide* .\n\nIAM roles for tasks on Windows require that the `-EnableTaskIAMRole` option is set when you launch the Amazon ECS-optimized Windows AMI. Your containers must also run some configuration code to use the feature. For more information, see [Windows IAM roles for tasks](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/windows_task_IAM_roles.html) in the *Amazon Elastic Container Service Developer Guide* .\n\n> String validation is done on the ECS side. If an invalid string value is given for `TaskRoleArn` , it may cause the Cloudformation job to hang.", "title": "TaskRoleArn", "type": "string" }, @@ -84407,12 +84399,12 @@ "items": { "type": "string" }, - "markdownDescription": "The command that's passed to the container. This parameter maps to `Cmd` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `COMMAND` parameter to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) . For more information, see [https://docs.docker.com/engine/reference/builder/#cmd](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/builder/#cmd) . If there are multiple arguments, each argument is a separated string in the array.", + "markdownDescription": "The command that's passed to the container. This parameter maps to `Cmd` in the docker container create command and the `COMMAND` parameter to docker run. If there are multiple arguments, each argument is a separated string in the array.", "title": "Command", "type": "array" }, "Cpu": { - "markdownDescription": "The number of `cpu` units reserved for the container. This parameter maps to `CpuShares` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--cpu-shares` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\nThis field is optional for tasks using the Fargate launch type, and the only requirement is that the total amount of CPU reserved for all containers within a task be lower than the task-level `cpu` value.\n\n> You can determine the number of CPU units that are available per EC2 instance type by multiplying the vCPUs listed for that instance type on the [Amazon EC2 Instances](https://docs.aws.amazon.com/ec2/instance-types/) detail page by 1,024. \n\nLinux containers share unallocated CPU units with other containers on the container instance with the same ratio as their allocated amount. For example, if you run a single-container task on a single-core instance type with 512 CPU units specified for that container, and that's the only task running on the container instance, that container could use the full 1,024 CPU unit share at any given time. However, if you launched another copy of the same task on that container instance, each task is guaranteed a minimum of 512 CPU units when needed. Moreover, each container could float to higher CPU usage if the other container was not using it. If both tasks were 100% active all of the time, they would be limited to 512 CPU units.\n\nOn Linux container instances, the Docker daemon on the container instance uses the CPU value to calculate the relative CPU share ratios for running containers. For more information, see [CPU share constraint](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#cpu-share-constraint) in the Docker documentation. The minimum valid CPU share value that the Linux kernel allows is 2, and the maximum valid CPU share value that the Linux kernel allows is 262144. However, the CPU parameter isn't required, and you can use CPU values below 2 or above 262144 in your container definitions. For CPU values below 2 (including null) or above 262144, the behavior varies based on your Amazon ECS container agent version:\n\n- *Agent versions less than or equal to 1.1.0:* Null and zero CPU values are passed to Docker as 0, which Docker then converts to 1,024 CPU shares. CPU values of 1 are passed to Docker as 1, which the Linux kernel converts to two CPU shares.\n- *Agent versions greater than or equal to 1.2.0:* Null, zero, and CPU values of 1 are passed to Docker as 2.\n- *Agent versions greater than or equal to 1.84.0:* CPU values greater than 256 vCPU are passed to Docker as 256, which is equivalent to 262144 CPU shares.\n\nOn Windows container instances, the CPU limit is enforced as an absolute limit, or a quota. Windows containers only have access to the specified amount of CPU that's described in the task definition. A null or zero CPU value is passed to Docker as `0` , which Windows interprets as 1% of one CPU.", + "markdownDescription": "The number of `cpu` units reserved for the container. This parameter maps to `CpuShares` in the docker container create commandand the `--cpu-shares` option to docker run.\n\nThis field is optional for tasks using the Fargate launch type, and the only requirement is that the total amount of CPU reserved for all containers within a task be lower than the task-level `cpu` value.\n\n> You can determine the number of CPU units that are available per EC2 instance type by multiplying the vCPUs listed for that instance type on the [Amazon EC2 Instances](https://docs.aws.amazon.com/ec2/instance-types/) detail page by 1,024. \n\nLinux containers share unallocated CPU units with other containers on the container instance with the same ratio as their allocated amount. For example, if you run a single-container task on a single-core instance type with 512 CPU units specified for that container, and that's the only task running on the container instance, that container could use the full 1,024 CPU unit share at any given time. However, if you launched another copy of the same task on that container instance, each task is guaranteed a minimum of 512 CPU units when needed. Moreover, each container could float to higher CPU usage if the other container was not using it. If both tasks were 100% active all of the time, they would be limited to 512 CPU units.\n\nOn Linux container instances, the Docker daemon on the container instance uses the CPU value to calculate the relative CPU share ratios for running containers. The minimum valid CPU share value that the Linux kernel allows is 2, and the maximum valid CPU share value that the Linux kernel allows is 262144. However, the CPU parameter isn't required, and you can use CPU values below 2 or above 262144 in your container definitions. For CPU values below 2 (including null) or above 262144, the behavior varies based on your Amazon ECS container agent version:\n\n- *Agent versions less than or equal to 1.1.0:* Null and zero CPU values are passed to Docker as 0, which Docker then converts to 1,024 CPU shares. CPU values of 1 are passed to Docker as 1, which the Linux kernel converts to two CPU shares.\n- *Agent versions greater than or equal to 1.2.0:* Null, zero, and CPU values of 1 are passed to Docker as 2.\n- *Agent versions greater than or equal to 1.84.0:* CPU values greater than 256 vCPU are passed to Docker as 256, which is equivalent to 262144 CPU shares.\n\nOn Windows container instances, the CPU limit is enforced as an absolute limit, or a quota. Windows containers only have access to the specified amount of CPU that's described in the task definition. A null or zero CPU value is passed to Docker as `0` , which Windows interprets as 1% of one CPU.", "title": "Cpu", "type": "number" }, @@ -84433,7 +84425,7 @@ "type": "array" }, "DisableNetworking": { - "markdownDescription": "When this parameter is true, networking is off within the container. This parameter maps to `NetworkDisabled` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) .\n\n> This parameter is not supported for Windows containers.", + "markdownDescription": "When this parameter is true, networking is off within the container. This parameter maps to `NetworkDisabled` in the docker container create command.\n\n> This parameter is not supported for Windows containers.", "title": "DisableNetworking", "type": "boolean" }, @@ -84441,7 +84433,7 @@ "items": { "type": "string" }, - "markdownDescription": "A list of DNS search domains that are presented to the container. This parameter maps to `DnsSearch` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--dns-search` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> This parameter is not supported for Windows containers.", + "markdownDescription": "A list of DNS search domains that are presented to the container. This parameter maps to `DnsSearch` in the docker container create command and the `--dns-search` option to docker run.\n\n> This parameter is not supported for Windows containers.", "title": "DnsSearchDomains", "type": "array" }, @@ -84449,13 +84441,13 @@ "items": { "type": "string" }, - "markdownDescription": "A list of DNS servers that are presented to the container. This parameter maps to `Dns` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--dns` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> This parameter is not supported for Windows containers.", + "markdownDescription": "A list of DNS servers that are presented to the container. This parameter maps to `Dns` in the docker container create command and the `--dns` option to docker run.\n\n> This parameter is not supported for Windows containers.", "title": "DnsServers", "type": "array" }, "DockerLabels": { "additionalProperties": true, - "markdownDescription": "A key/value map of labels to add to the container. This parameter maps to `Labels` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--label` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) . This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`", + "markdownDescription": "A key/value map of labels to add to the container. This parameter maps to `Labels` in the docker container create command and the `--label` option to docker run. This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" @@ -84468,7 +84460,7 @@ "items": { "type": "string" }, - "markdownDescription": "A list of strings to provide custom configuration for multiple security systems. For more information about valid values, see [Docker Run Security Configuration](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) . This field isn't valid for containers in tasks using the Fargate launch type.\n\nFor Linux tasks on EC2, this parameter can be used to reference custom labels for SELinux and AppArmor multi-level security systems.\n\nFor any tasks on EC2, this parameter can be used to reference a credential spec file that configures a container for Active Directory authentication. For more information, see [Using gMSAs for Windows Containers](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/windows-gmsa.html) and [Using gMSAs for Linux Containers](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/linux-gmsa.html) in the *Amazon Elastic Container Service Developer Guide* .\n\nThis parameter maps to `SecurityOpt` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--security-opt` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> The Amazon ECS container agent running on a container instance must register with the `ECS_SELINUX_CAPABLE=true` or `ECS_APPARMOR_CAPABLE=true` environment variables before containers placed on that instance can use these security options. For more information, see [Amazon ECS Container Agent Configuration](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) in the *Amazon Elastic Container Service Developer Guide* . \n\nFor more information about valid values, see [Docker Run Security Configuration](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\nValid values: \"no-new-privileges\" | \"apparmor:PROFILE\" | \"label:value\" | \"credentialspec:CredentialSpecFilePath\"", + "markdownDescription": "A list of strings to provide custom configuration for multiple security systems. This field isn't valid for containers in tasks using the Fargate launch type.\n\nFor Linux tasks on EC2, this parameter can be used to reference custom labels for SELinux and AppArmor multi-level security systems.\n\nFor any tasks on EC2, this parameter can be used to reference a credential spec file that configures a container for Active Directory authentication. For more information, see [Using gMSAs for Windows Containers](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/windows-gmsa.html) and [Using gMSAs for Linux Containers](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/linux-gmsa.html) in the *Amazon Elastic Container Service Developer Guide* .\n\nThis parameter maps to `SecurityOpt` in the docker container create command and the `--security-opt` option to docker run.\n\n> The Amazon ECS container agent running on a container instance must register with the `ECS_SELINUX_CAPABLE=true` or `ECS_APPARMOR_CAPABLE=true` environment variables before containers placed on that instance can use these security options. For more information, see [Amazon ECS Container Agent Configuration](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) in the *Amazon Elastic Container Service Developer Guide* . \n\nValid values: \"no-new-privileges\" | \"apparmor:PROFILE\" | \"label:value\" | \"credentialspec:CredentialSpecFilePath\"", "title": "DockerSecurityOptions", "type": "array" }, @@ -84476,7 +84468,7 @@ "items": { "type": "string" }, - "markdownDescription": "> Early versions of the Amazon ECS container agent don't properly handle `entryPoint` parameters. If you have problems using `entryPoint` , update your container agent or enter your commands and arguments as `command` array items instead. \n\nThe entry point that's passed to the container. This parameter maps to `Entrypoint` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--entrypoint` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) . For more information, see [https://docs.docker.com/engine/reference/builder/#entrypoint](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/builder/#entrypoint) .", + "markdownDescription": "> Early versions of the Amazon ECS container agent don't properly handle `entryPoint` parameters. If you have problems using `entryPoint` , update your container agent or enter your commands and arguments as `command` array items instead. \n\nThe entry point that's passed to the container. This parameter maps to `Entrypoint` in the docker container create command and the `--entrypoint` option to docker run.", "title": "EntryPoint", "type": "array" }, @@ -84484,7 +84476,7 @@ "items": { "$ref": "#/definitions/AWS::ECS::TaskDefinition.KeyValuePair" }, - "markdownDescription": "The environment variables to pass to a container. This parameter maps to `Env` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--env` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> We don't recommend that you use plaintext environment variables for sensitive information, such as credential data.", + "markdownDescription": "The environment variables to pass to a container. This parameter maps to `Env` in the docker container create command and the `--env` option to docker run.\n\n> We don't recommend that you use plaintext environment variables for sensitive information, such as credential data.", "title": "Environment", "type": "array" }, @@ -84492,7 +84484,7 @@ "items": { "$ref": "#/definitions/AWS::ECS::TaskDefinition.EnvironmentFile" }, - "markdownDescription": "A list of files containing the environment variables to pass to a container. This parameter maps to the `--env-file` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\nYou can specify up to ten environment files. The file must have a `.env` file extension. Each line in an environment file contains an environment variable in `VARIABLE=VALUE` format. Lines beginning with `#` are treated as comments and are ignored. For more information about the environment variable file syntax, see [Declare default environment variables in file](https://docs.aws.amazon.com/https://docs.docker.com/compose/env-file/) .\n\nIf there are environment variables specified using the `environment` parameter in a container definition, they take precedence over the variables contained within an environment file. If multiple environment files are specified that contain the same variable, they're processed from the top down. We recommend that you use unique variable names. For more information, see [Specifying Environment Variables](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/taskdef-envfiles.html) in the *Amazon Elastic Container Service Developer Guide* .", + "markdownDescription": "A list of files containing the environment variables to pass to a container. This parameter maps to the `--env-file` option to docker run.\n\nYou can specify up to ten environment files. The file must have a `.env` file extension. Each line in an environment file contains an environment variable in `VARIABLE=VALUE` format. Lines beginning with `#` are treated as comments and are ignored.\n\nIf there are environment variables specified using the `environment` parameter in a container definition, they take precedence over the variables contained within an environment file. If multiple environment files are specified that contain the same variable, they're processed from the top down. We recommend that you use unique variable names. For more information, see [Specifying Environment Variables](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/taskdef-envfiles.html) in the *Amazon Elastic Container Service Developer Guide* .", "title": "EnvironmentFiles", "type": "array" }, @@ -84505,7 +84497,7 @@ "items": { "$ref": "#/definitions/AWS::ECS::TaskDefinition.HostEntry" }, - "markdownDescription": "A list of hostnames and IP address mappings to append to the `/etc/hosts` file on the container. This parameter maps to `ExtraHosts` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--add-host` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> This parameter isn't supported for Windows containers or tasks that use the `awsvpc` network mode.", + "markdownDescription": "A list of hostnames and IP address mappings to append to the `/etc/hosts` file on the container. This parameter maps to `ExtraHosts` in the docker container create command and the `--add-host` option to docker run.\n\n> This parameter isn't supported for Windows containers or tasks that use the `awsvpc` network mode.", "title": "ExtraHosts", "type": "array" }, @@ -84516,21 +84508,21 @@ }, "HealthCheck": { "$ref": "#/definitions/AWS::ECS::TaskDefinition.HealthCheck", - "markdownDescription": "The container health check command and associated configuration parameters for the container. This parameter maps to `HealthCheck` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `HEALTHCHECK` parameter of [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .", + "markdownDescription": "The container health check command and associated configuration parameters for the container. This parameter maps to `HealthCheck` in the docker container create command and the `HEALTHCHECK` parameter of docker run.", "title": "HealthCheck" }, "Hostname": { - "markdownDescription": "The hostname to use for your container. This parameter maps to `Hostname` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--hostname` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> The `hostname` parameter is not supported if you're using the `awsvpc` network mode.", + "markdownDescription": "The hostname to use for your container. This parameter maps to `Hostname` in the docker container create command and the `--hostname` option to docker run.\n\n> The `hostname` parameter is not supported if you're using the `awsvpc` network mode.", "title": "Hostname", "type": "string" }, "Image": { - "markdownDescription": "The image used to start a container. This string is passed directly to the Docker daemon. By default, images in the Docker Hub registry are available. Other repositories are specified with either `*repository-url* / *image* : *tag*` or `*repository-url* / *image* @ *digest*` . Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons, periods, forward slashes, and number signs are allowed. This parameter maps to `Image` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `IMAGE` parameter of [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n- When a new task starts, the Amazon ECS container agent pulls the latest version of the specified image and tag for the container to use. However, subsequent updates to a repository image aren't propagated to already running tasks.\n- Images in Amazon ECR repositories can be specified by either using the full `registry/repository:tag` or `registry/repository@digest` . For example, `012345678910.dkr.ecr..amazonaws.com/:latest` or `012345678910.dkr.ecr..amazonaws.com/@sha256:94afd1f2e64d908bc90dbca0035a5b567EXAMPLE` .\n- Images in official repositories on Docker Hub use a single name (for example, `ubuntu` or `mongo` ).\n- Images in other repositories on Docker Hub are qualified with an organization name (for example, `amazon/amazon-ecs-agent` ).\n- Images in other online repositories are qualified further by a domain name (for example, `quay.io/assemblyline/ubuntu` ).", + "markdownDescription": "The image used to start a container. This string is passed directly to the Docker daemon. By default, images in the Docker Hub registry are available. Other repositories are specified with either `*repository-url* / *image* : *tag*` or `*repository-url* / *image* @ *digest*` . Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons, periods, forward slashes, and number signs are allowed. This parameter maps to `Image` in the docker container create command and the `IMAGE` parameter of docker run.\n\n- When a new task starts, the Amazon ECS container agent pulls the latest version of the specified image and tag for the container to use. However, subsequent updates to a repository image aren't propagated to already running tasks.\n- Images in Amazon ECR repositories can be specified by either using the full `registry/repository:tag` or `registry/repository@digest` . For example, `012345678910.dkr.ecr..amazonaws.com/:latest` or `012345678910.dkr.ecr..amazonaws.com/@sha256:94afd1f2e64d908bc90dbca0035a5b567EXAMPLE` .\n- Images in official repositories on Docker Hub use a single name (for example, `ubuntu` or `mongo` ).\n- Images in other repositories on Docker Hub are qualified with an organization name (for example, `amazon/amazon-ecs-agent` ).\n- Images in other online repositories are qualified further by a domain name (for example, `quay.io/assemblyline/ubuntu` ).", "title": "Image", "type": "string" }, "Interactive": { - "markdownDescription": "When this parameter is `true` , you can deploy containerized applications that require `stdin` or a `tty` to be allocated. This parameter maps to `OpenStdin` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--interactive` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .", + "markdownDescription": "When this parameter is `true` , you can deploy containerized applications that require `stdin` or a `tty` to be allocated. This parameter maps to `OpenStdin` in the docker container create command and the `--interactive` option to docker run.", "title": "Interactive", "type": "boolean" }, @@ -84538,7 +84530,7 @@ "items": { "type": "string" }, - "markdownDescription": "The `links` parameter allows containers to communicate with each other without the need for port mappings. This parameter is only supported if the network mode of a task definition is `bridge` . The `name:internalName` construct is analogous to `name:alias` in Docker links. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. For more information about linking Docker containers, go to [Legacy container links](https://docs.aws.amazon.com/https://docs.docker.com/network/links/) in the Docker documentation. This parameter maps to `Links` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--link` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> This parameter is not supported for Windows containers. > Containers that are collocated on a single container instance may be able to communicate with each other without requiring links or host port mappings. Network isolation is achieved on the container instance using security groups and VPC settings.", + "markdownDescription": "The `links` parameter allows containers to communicate with each other without the need for port mappings. This parameter is only supported if the network mode of a task definition is `bridge` . The `name:internalName` construct is analogous to `name:alias` in Docker links. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed.. This parameter maps to `Links` in the docker container create command and the `--link` option to docker run.\n\n> This parameter is not supported for Windows containers. > Containers that are collocated on a single container instance may be able to communicate with each other without requiring links or host port mappings. Network isolation is achieved on the container instance using security groups and VPC settings.", "title": "Links", "type": "array" }, @@ -84549,7 +84541,7 @@ }, "LogConfiguration": { "$ref": "#/definitions/AWS::ECS::TaskDefinition.LogConfiguration", - "markdownDescription": "The log configuration specification for the container.\n\nThis parameter maps to `LogConfig` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--log-driver` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) . By default, containers use the same logging driver that the Docker daemon uses. However, the container may use a different logging driver than the Docker daemon by specifying a log driver with this parameter in the container definition. To use a different logging driver for a container, the log system must be configured properly on the container instance (or on a different log server for remote logging options). For more information on the options for different supported log drivers, see [Configure logging drivers](https://docs.aws.amazon.com/https://docs.docker.com/engine/admin/logging/overview/) in the Docker documentation.\n\n> Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon (shown in the [LogConfiguration](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_LogConfiguration.html) data type). Additional log drivers may be available in future releases of the Amazon ECS container agent. \n\nThis parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`\n\n> The Amazon ECS container agent running on a container instance must register the logging drivers available on that instance with the `ECS_AVAILABLE_LOGGING_DRIVERS` environment variable before containers placed on that instance can use these log configuration options. For more information, see [Amazon ECS Container Agent Configuration](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) in the *Amazon Elastic Container Service Developer Guide* .", + "markdownDescription": "The log configuration specification for the container.\n\nThis parameter maps to `LogConfig` in the docker Create a container command and the `--log-driver` option to docker run. By default, containers use the same logging driver that the Docker daemon uses. However, the container may use a different logging driver than the Docker daemon by specifying a log driver with this parameter in the container definition. To use a different logging driver for a container, the log system must be configured properly on the container instance (or on a different log server for remote logging options). For more information on the options for different supported log drivers, see [Configure logging drivers](https://docs.aws.amazon.com/https://docs.docker.com/engine/admin/logging/overview/) in the Docker documentation.\n\n> Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon (shown in the [LogConfiguration](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_LogConfiguration.html) data type). Additional log drivers may be available in future releases of the Amazon ECS container agent. \n\nThis parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`\n\n> The Amazon ECS container agent running on a container instance must register the logging drivers available on that instance with the `ECS_AVAILABLE_LOGGING_DRIVERS` environment variable before containers placed on that instance can use these log configuration options. For more information, see [Amazon ECS Container Agent Configuration](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) in the *Amazon Elastic Container Service Developer Guide* .", "title": "LogConfiguration" }, "Memory": { @@ -84558,7 +84550,7 @@ "type": "number" }, "MemoryReservation": { - "markdownDescription": "The soft limit (in MiB) of memory to reserve for the container. When system memory is under heavy contention, Docker attempts to keep the container memory to this soft limit. However, your container can consume more memory when it needs to, up to either the hard limit specified with the `memory` parameter (if applicable), or all of the available memory on the container instance, whichever comes first. This parameter maps to `MemoryReservation` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--memory-reservation` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\nIf a task-level memory value is not specified, you must specify a non-zero integer for one or both of `memory` or `memoryReservation` in a container definition. If you specify both, `memory` must be greater than `memoryReservation` . If you specify `memoryReservation` , then that value is subtracted from the available memory resources for the container instance where the container is placed. Otherwise, the value of `memory` is used.\n\nFor example, if your container normally uses 128 MiB of memory, but occasionally bursts to 256 MiB of memory for short periods of time, you can set a `memoryReservation` of 128 MiB, and a `memory` hard limit of 300 MiB. This configuration would allow the container to only reserve 128 MiB of memory from the remaining resources on the container instance, but also allow the container to consume more memory resources when needed.\n\nThe Docker 20.10.0 or later daemon reserves a minimum of 6 MiB of memory for a container. So, don't specify less than 6 MiB of memory for your containers.\n\nThe Docker 19.03.13-ce or earlier daemon reserves a minimum of 4 MiB of memory for a container. So, don't specify less than 4 MiB of memory for your containers.", + "markdownDescription": "The soft limit (in MiB) of memory to reserve for the container. When system memory is under heavy contention, Docker attempts to keep the container memory to this soft limit. However, your container can consume more memory when it needs to, up to either the hard limit specified with the `memory` parameter (if applicable), or all of the available memory on the container instance, whichever comes first. This parameter maps to `MemoryReservation` in the docker container create command and the `--memory-reservation` option to docker run.\n\nIf a task-level memory value is not specified, you must specify a non-zero integer for one or both of `memory` or `memoryReservation` in a container definition. If you specify both, `memory` must be greater than `memoryReservation` . If you specify `memoryReservation` , then that value is subtracted from the available memory resources for the container instance where the container is placed. Otherwise, the value of `memory` is used.\n\nFor example, if your container normally uses 128 MiB of memory, but occasionally bursts to 256 MiB of memory for short periods of time, you can set a `memoryReservation` of 128 MiB, and a `memory` hard limit of 300 MiB. This configuration would allow the container to only reserve 128 MiB of memory from the remaining resources on the container instance, but also allow the container to consume more memory resources when needed.\n\nThe Docker 20.10.0 or later daemon reserves a minimum of 6 MiB of memory for a container. So, don't specify less than 6 MiB of memory for your containers.\n\nThe Docker 19.03.13-ce or earlier daemon reserves a minimum of 4 MiB of memory for a container. So, don't specify less than 4 MiB of memory for your containers.", "title": "MemoryReservation", "type": "number" }, @@ -84566,12 +84558,12 @@ "items": { "$ref": "#/definitions/AWS::ECS::TaskDefinition.MountPoint" }, - "markdownDescription": "The mount points for data volumes in your container.\n\nThis parameter maps to `Volumes` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--volume` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\nWindows containers can mount whole directories on the same drive as `$env:ProgramData` . Windows containers can't mount directories on a different drive, and mount point can't be across drives.", + "markdownDescription": "The mount points for data volumes in your container.\n\nThis parameter maps to `Volumes` in the docker container create command and the `--volume` option to docker run.\n\nWindows containers can mount whole directories on the same drive as `$env:ProgramData` . Windows containers can't mount directories on a different drive, and mount point can't be across drives.", "title": "MountPoints", "type": "array" }, "Name": { - "markdownDescription": "The name of a container. If you're linking multiple containers together in a task definition, the `name` of one container can be entered in the `links` of another container to connect the containers. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. This parameter maps to `name` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--name` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .", + "markdownDescription": "The name of a container. If you're linking multiple containers together in a task definition, the `name` of one container can be entered in the `links` of another container to connect the containers. Up to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. This parameter maps to `name` in the docker container create command and the `--name` option to docker run.", "title": "Name", "type": "string" }, @@ -84584,17 +84576,17 @@ "type": "array" }, "Privileged": { - "markdownDescription": "When this parameter is true, the container is given elevated privileges on the host container instance (similar to the `root` user). This parameter maps to `Privileged` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--privileged` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> This parameter is not supported for Windows containers or tasks run on AWS Fargate .", + "markdownDescription": "When this parameter is true, the container is given elevated privileges on the host container instance (similar to the `root` user). This parameter maps to `Privileged` in the docker container create command and the `--privileged` option to docker run\n\n> This parameter is not supported for Windows containers or tasks run on AWS Fargate .", "title": "Privileged", "type": "boolean" }, "PseudoTerminal": { - "markdownDescription": "When this parameter is `true` , a TTY is allocated. This parameter maps to `Tty` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--tty` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .", + "markdownDescription": "When this parameter is `true` , a TTY is allocated. This parameter maps to `Tty` in the docker container create command and the `--tty` option to docker run.", "title": "PseudoTerminal", "type": "boolean" }, "ReadonlyRootFilesystem": { - "markdownDescription": "When this parameter is true, the container is given read-only access to its root file system. This parameter maps to `ReadonlyRootfs` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--read-only` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> This parameter is not supported for Windows containers.", + "markdownDescription": "When this parameter is true, the container is given read-only access to its root file system. This parameter maps to `ReadonlyRootfs` in the docker container create command and the `--read-only` option to docker run.\n\n> This parameter is not supported for Windows containers.", "title": "ReadonlyRootFilesystem", "type": "boolean" }, @@ -84625,7 +84617,7 @@ "type": "number" }, "StopTimeout": { - "markdownDescription": "Time duration (in seconds) to wait before the container is forcefully killed if it doesn't exit normally on its own.\n\nFor tasks using the Fargate launch type, the task or service requires the following platforms:\n\n- Linux platform version `1.3.0` or later.\n- Windows platform version `1.0.0` or later.\n\nThe max stop timeout value is 120 seconds and if the parameter is not specified, the default value of 30 seconds is used.\n\nFor tasks that use the EC2 launch type, if the `stopTimeout` parameter isn't specified, the value set for the Amazon ECS container agent configuration variable `ECS_CONTAINER_STOP_TIMEOUT` is used. If neither the `stopTimeout` parameter or the `ECS_CONTAINER_STOP_TIMEOUT` agent configuration variable are set, then the default values of 30 seconds for Linux containers and 30 seconds on Windows containers are used. Your container instances require at least version 1.26.0 of the container agent to use a container stop timeout value. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see [Updating the Amazon ECS Container Agent](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-update.html) in the *Amazon Elastic Container Service Developer Guide* . If you're using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the `ecs-init` package. If your container instances are launched from version `20190301` or later, then they contain the required versions of the container agent and `ecs-init` . For more information, see [Amazon ECS-optimized Linux AMI](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html) in the *Amazon Elastic Container Service Developer Guide* .\n\nThe valid values are 2-120 seconds.", + "markdownDescription": "Time duration (in seconds) to wait before the container is forcefully killed if it doesn't exit normally on its own.\n\nFor tasks using the Fargate launch type, the task or service requires the following platforms:\n\n- Linux platform version `1.3.0` or later.\n- Windows platform version `1.0.0` or later.\n\nFor tasks that use the Fargate launch type, the max stop timeout value is 120 seconds and if the parameter is not specified, the default value of 30 seconds is used.\n\nFor tasks that use the EC2 launch type, if the `stopTimeout` parameter isn't specified, the value set for the Amazon ECS container agent configuration variable `ECS_CONTAINER_STOP_TIMEOUT` is used. If neither the `stopTimeout` parameter or the `ECS_CONTAINER_STOP_TIMEOUT` agent configuration variable are set, then the default values of 30 seconds for Linux containers and 30 seconds on Windows containers are used. Your container instances require at least version 1.26.0 of the container agent to use a container stop timeout value. However, we recommend using the latest container agent version. For information about checking your agent version and updating to the latest version, see [Updating the Amazon ECS Container Agent](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-update.html) in the *Amazon Elastic Container Service Developer Guide* . If you're using an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the `ecs-init` package. If your container instances are launched from version `20190301` or later, then they contain the required versions of the container agent and `ecs-init` . For more information, see [Amazon ECS-optimized Linux AMI](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html) in the *Amazon Elastic Container Service Developer Guide* .\n\nThe valid values for Fargate are 2-120 seconds.", "title": "StopTimeout", "type": "number" }, @@ -84633,7 +84625,7 @@ "items": { "$ref": "#/definitions/AWS::ECS::TaskDefinition.SystemControl" }, - "markdownDescription": "A list of namespaced kernel parameters to set in the container. This parameter maps to `Sysctls` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--sysctl` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) . For example, you can configure `net.ipv4.tcp_keepalive_time` setting to maintain longer lived connections.", + "markdownDescription": "A list of namespaced kernel parameters to set in the container. This parameter maps to `Sysctls` in the docker container create command and the `--sysctl` option to docker run. For example, you can configure `net.ipv4.tcp_keepalive_time` setting to maintain longer lived connections.", "title": "SystemControls", "type": "array" }, @@ -84646,7 +84638,7 @@ "type": "array" }, "User": { - "markdownDescription": "The user to use inside the container. This parameter maps to `User` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--user` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> When running tasks using the `host` network mode, don't run containers using the root user (UID 0). We recommend using a non-root user for better security. \n\nYou can specify the `user` using the following formats. If specifying a UID or GID, you must specify it as a positive integer.\n\n- `user`\n- `user:group`\n- `uid`\n- `uid:gid`\n- `user:gid`\n- `uid:group`\n\n> This parameter is not supported for Windows containers.", + "markdownDescription": "The user to use inside the container. This parameter maps to `User` in the docker container create command and the `--user` option to docker run.\n\n> When running tasks using the `host` network mode, don't run containers using the root user (UID 0). We recommend using a non-root user for better security. \n\nYou can specify the `user` using the following formats. If specifying a UID or GID, you must specify it as a positive integer.\n\n- `user`\n- `user:group`\n- `uid`\n- `uid:gid`\n- `user:gid`\n- `uid:group`\n\n> This parameter is not supported for Windows containers.", "title": "User", "type": "string" }, @@ -84654,12 +84646,12 @@ "items": { "$ref": "#/definitions/AWS::ECS::TaskDefinition.VolumeFrom" }, - "markdownDescription": "Data volumes to mount from another container. This parameter maps to `VolumesFrom` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--volumes-from` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .", + "markdownDescription": "Data volumes to mount from another container. This parameter maps to `VolumesFrom` in the docker container create command and the `--volumes-from` option to docker run.", "title": "VolumesFrom", "type": "array" }, "WorkingDirectory": { - "markdownDescription": "The working directory to run commands inside the container in. This parameter maps to `WorkingDir` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--workdir` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .", + "markdownDescription": "The working directory to run commands inside the container in. This parameter maps to `WorkingDir` in the docker container create command and the `--workdir` option to docker run.", "title": "WorkingDirectory", "type": "string" } @@ -84719,13 +84711,13 @@ "type": "boolean" }, "Driver": { - "markdownDescription": "The Docker volume driver to use. The driver value must match the driver name provided by Docker because it is used for task placement. If the driver was installed using the Docker plugin CLI, use `docker plugin ls` to retrieve the driver name from your container instance. If the driver was installed using another method, use Docker plugin discovery to retrieve the driver name. For more information, see [Docker plugin discovery](https://docs.aws.amazon.com/https://docs.docker.com/engine/extend/plugin_api/#plugin-discovery) . This parameter maps to `Driver` in the [Create a volume](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/VolumeCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `xxdriver` option to [docker volume create](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/commandline/volume_create/) .", + "markdownDescription": "The Docker volume driver to use. The driver value must match the driver name provided by Docker because it is used for task placement. If the driver was installed using the Docker plugin CLI, use `docker plugin ls` to retrieve the driver name from your container instance. If the driver was installed using another method, use Docker plugin discovery to retrieve the driver name. This parameter maps to `Driver` in the docker container create command and the `xxdriver` option to docker volume create.", "title": "Driver", "type": "string" }, "DriverOpts": { "additionalProperties": true, - "markdownDescription": "A map of Docker driver-specific options passed through. This parameter maps to `DriverOpts` in the [Create a volume](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/VolumeCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `xxopt` option to [docker volume create](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/commandline/volume_create/) .", + "markdownDescription": "A map of Docker driver-specific options passed through. This parameter maps to `DriverOpts` in the docker create-volume command and the `xxopt` option to docker volume create.", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" @@ -84736,7 +84728,7 @@ }, "Labels": { "additionalProperties": true, - "markdownDescription": "Custom metadata to add to your Docker volume. This parameter maps to `Labels` in the [Create a volume](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/VolumeCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `xxlabel` option to [docker volume create](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/commandline/volume_create/) .", + "markdownDescription": "Custom metadata to add to your Docker volume. This parameter maps to `Labels` in the docker container create command and the `xxlabel` option to docker volume create.", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" @@ -84818,12 +84810,12 @@ "additionalProperties": false, "properties": { "CredentialsParameter": { - "markdownDescription": "", + "markdownDescription": "The authorization credential option to use. The authorization credential options can be provided using either the Amazon Resource Name (ARN) of an AWS Secrets Manager secret or SSM Parameter Store parameter. The ARN refers to the stored credentials.", "title": "CredentialsParameter", "type": "string" }, "Domain": { - "markdownDescription": "", + "markdownDescription": "A fully qualified domain name hosted by an [AWS Directory Service](https://docs.aws.amazon.com/directoryservice/latest/admin-guide/directory_microsoft_ad.html) Managed Microsoft AD (Active Directory) or self-hosted AD on Amazon EC2.", "title": "Domain", "type": "string" } @@ -84888,7 +84880,7 @@ "items": { "type": "string" }, - "markdownDescription": "A string array representing the command that the container runs to determine if it is healthy. The string array must start with `CMD` to run the command arguments directly, or `CMD-SHELL` to run the command with the container's default shell.\n\nWhen you use the AWS Management Console JSON panel, the AWS Command Line Interface , or the APIs, enclose the list of commands in double quotes and brackets.\n\n`[ \"CMD-SHELL\", \"curl -f http://localhost/ || exit 1\" ]`\n\nYou don't include the double quotes and brackets when you use the AWS Management Console.\n\n`CMD-SHELL, curl -f http://localhost/ || exit 1`\n\nAn exit code of 0 indicates success, and non-zero exit code indicates failure. For more information, see `HealthCheck` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) .", + "markdownDescription": "A string array representing the command that the container runs to determine if it is healthy. The string array must start with `CMD` to run the command arguments directly, or `CMD-SHELL` to run the command with the container's default shell.\n\nWhen you use the AWS Management Console JSON panel, the AWS Command Line Interface , or the APIs, enclose the list of commands in double quotes and brackets.\n\n`[ \"CMD-SHELL\", \"curl -f http://localhost/ || exit 1\" ]`\n\nYou don't include the double quotes and brackets when you use the AWS Management Console.\n\n`CMD-SHELL, curl -f http://localhost/ || exit 1`\n\nAn exit code of 0 indicates success, and non-zero exit code indicates failure. For more information, see `HealthCheck` in the docker container create command", "title": "Command", "type": "array" }, @@ -84965,7 +84957,7 @@ "items": { "type": "string" }, - "markdownDescription": "The Linux capabilities for the container that have been added to the default configuration provided by Docker. This parameter maps to `CapAdd` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--cap-add` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> Tasks launched on AWS Fargate only support adding the `SYS_PTRACE` kernel capability. \n\nValid values: `\"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" | \"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" | \"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" | \"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\" | \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" | \"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" | \"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" | \"WAKE_ALARM\"`", + "markdownDescription": "The Linux capabilities for the container that have been added to the default configuration provided by Docker. This parameter maps to `CapAdd` in the docker container create command and the `--cap-add` option to docker run.\n\n> Tasks launched on AWS Fargate only support adding the `SYS_PTRACE` kernel capability. \n\nValid values: `\"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" | \"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" | \"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" | \"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\" | \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" | \"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" | \"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" | \"WAKE_ALARM\"`", "title": "Add", "type": "array" }, @@ -84973,7 +84965,7 @@ "items": { "type": "string" }, - "markdownDescription": "The Linux capabilities for the container that have been removed from the default configuration provided by Docker. This parameter maps to `CapDrop` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--cap-drop` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\nValid values: `\"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" | \"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" | \"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" | \"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\" | \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" | \"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" | \"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" | \"WAKE_ALARM\"`", + "markdownDescription": "The Linux capabilities for the container that have been removed from the default configuration provided by Docker. This parameter maps to `CapDrop` in the docker container create command and the `--cap-drop` option to docker run.\n\nValid values: `\"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" | \"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" | \"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" | \"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\" | \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" | \"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" | \"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" | \"WAKE_ALARM\"`", "title": "Drop", "type": "array" } @@ -85008,27 +85000,27 @@ "items": { "$ref": "#/definitions/AWS::ECS::TaskDefinition.Device" }, - "markdownDescription": "Any host devices to expose to the container. This parameter maps to `Devices` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.35/) and the `--device` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> If you're using tasks that use the Fargate launch type, the `devices` parameter isn't supported.", + "markdownDescription": "Any host devices to expose to the container. This parameter maps to `Devices` in the docker container create command and the `--device` option to docker run.\n\n> If you're using tasks that use the Fargate launch type, the `devices` parameter isn't supported.", "title": "Devices", "type": "array" }, "InitProcessEnabled": { - "markdownDescription": "Run an `init` process inside the container that forwards signals and reaps processes. This parameter maps to the `--init` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) . This parameter requires version 1.25 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`", + "markdownDescription": "Run an `init` process inside the container that forwards signals and reaps processes. This parameter maps to the `--init` option to docker run. This parameter requires version 1.25 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`", "title": "InitProcessEnabled", "type": "boolean" }, "MaxSwap": { - "markdownDescription": "The total amount of swap memory (in MiB) a container can use. This parameter will be translated to the `--memory-swap` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) where the value would be the sum of the container memory plus the `maxSwap` value.\n\nIf a `maxSwap` value of `0` is specified, the container will not use swap. Accepted values are `0` or any positive integer. If the `maxSwap` parameter is omitted, the container will use the swap configuration for the container instance it is running on. A `maxSwap` value must be set for the `swappiness` parameter to be used.\n\n> If you're using tasks that use the Fargate launch type, the `maxSwap` parameter isn't supported.\n> \n> If you're using tasks on Amazon Linux 2023 the `swappiness` parameter isn't supported.", + "markdownDescription": "The total amount of swap memory (in MiB) a container can use. This parameter will be translated to the `--memory-swap` option to docker run where the value would be the sum of the container memory plus the `maxSwap` value.\n\nIf a `maxSwap` value of `0` is specified, the container will not use swap. Accepted values are `0` or any positive integer. If the `maxSwap` parameter is omitted, the container will use the swap configuration for the container instance it is running on. A `maxSwap` value must be set for the `swappiness` parameter to be used.\n\n> If you're using tasks that use the Fargate launch type, the `maxSwap` parameter isn't supported.\n> \n> If you're using tasks on Amazon Linux 2023 the `swappiness` parameter isn't supported.", "title": "MaxSwap", "type": "number" }, "SharedMemorySize": { - "markdownDescription": "The value for the size (in MiB) of the `/dev/shm` volume. This parameter maps to the `--shm-size` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> If you are using tasks that use the Fargate launch type, the `sharedMemorySize` parameter is not supported.", + "markdownDescription": "The value for the size (in MiB) of the `/dev/shm` volume. This parameter maps to the `--shm-size` option to docker run.\n\n> If you are using tasks that use the Fargate launch type, the `sharedMemorySize` parameter is not supported.", "title": "SharedMemorySize", "type": "number" }, "Swappiness": { - "markdownDescription": "This allows you to tune a container's memory swappiness behavior. A `swappiness` value of `0` will cause swapping to not happen unless absolutely necessary. A `swappiness` value of `100` will cause pages to be swapped very aggressively. Accepted values are whole numbers between `0` and `100` . If the `swappiness` parameter is not specified, a default value of `60` is used. If a value is not specified for `maxSwap` then this parameter is ignored. This parameter maps to the `--memory-swappiness` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> If you're using tasks that use the Fargate launch type, the `swappiness` parameter isn't supported.\n> \n> If you're using tasks on Amazon Linux 2023 the `swappiness` parameter isn't supported.", + "markdownDescription": "This allows you to tune a container's memory swappiness behavior. A `swappiness` value of `0` will cause swapping to not happen unless absolutely necessary. A `swappiness` value of `100` will cause pages to be swapped very aggressively. Accepted values are whole numbers between `0` and `100` . If the `swappiness` parameter is not specified, a default value of `60` is used. If a value is not specified for `maxSwap` then this parameter is ignored. This parameter maps to the `--memory-swappiness` option to docker run.\n\n> If you're using tasks that use the Fargate launch type, the `swappiness` parameter isn't supported.\n> \n> If you're using tasks on Amazon Linux 2023 the `swappiness` parameter isn't supported.", "title": "Swappiness", "type": "number" }, @@ -85036,7 +85028,7 @@ "items": { "$ref": "#/definitions/AWS::ECS::TaskDefinition.Tmpfs" }, - "markdownDescription": "The container path, mount options, and size (in MiB) of the tmpfs mount. This parameter maps to the `--tmpfs` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/#security-configuration) .\n\n> If you're using tasks that use the Fargate launch type, the `tmpfs` parameter isn't supported.", + "markdownDescription": "The container path, mount options, and size (in MiB) of the tmpfs mount. This parameter maps to the `--tmpfs` option to docker run.\n\n> If you're using tasks that use the Fargate launch type, the `tmpfs` parameter isn't supported.", "title": "Tmpfs", "type": "array" } @@ -85053,7 +85045,7 @@ }, "Options": { "additionalProperties": true, - "markdownDescription": "The configuration options to send to the log driver. This parameter requires version 1.19 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`", + "markdownDescription": "The configuration options to send to the log driver.\n\nThe options you can specify depend on the log driver. Some of the options you can specify when you use the `awslogs` log driver to route logs to Amazon CloudWatch include the following:\n\n- **awslogs-create-group** - Required: No\n\nSpecify whether you want the log group to be created automatically. If this option isn't specified, it defaults to `false` .\n\n> Your IAM policy must include the `logs:CreateLogGroup` permission before you attempt to use `awslogs-create-group` .\n- **awslogs-region** - Required: Yes\n\nSpecify the AWS Region that the `awslogs` log driver is to send your Docker logs to. You can choose to send all of your logs from clusters in different Regions to a single region in CloudWatch Logs. This is so that they're all visible in one location. Otherwise, you can separate them by Region for more granularity. Make sure that the specified log group exists in the Region that you specify with this option.\n- **awslogs-group** - Required: Yes\n\nMake sure to specify a log group that the `awslogs` log driver sends its log streams to.\n- **awslogs-stream-prefix** - Required: Yes, when using the Fargate launch type.Optional for the EC2 launch type, required for the Fargate launch type.\n\nUse the `awslogs-stream-prefix` option to associate a log stream with the specified prefix, the container name, and the ID of the Amazon ECS task that the container belongs to. If you specify a prefix with this option, then the log stream takes the format `prefix-name/container-name/ecs-task-id` .\n\nIf you don't specify a prefix with this option, then the log stream is named after the container ID that's assigned by the Docker daemon on the container instance. Because it's difficult to trace logs back to the container that sent them with just the Docker container ID (which is only available on the container instance), we recommend that you specify a prefix with this option.\n\nFor Amazon ECS services, you can use the service name as the prefix. Doing so, you can trace log streams to the service that the container belongs to, the name of the container that sent them, and the ID of the task that the container belongs to.\n\nYou must specify a stream-prefix for your logs to have your logs appear in the Log pane when using the Amazon ECS console.\n- **awslogs-datetime-format** - Required: No\n\nThis option defines a multiline start pattern in Python `strftime` format. A log message consists of a line that matches the pattern and any following lines that don\u2019t match the pattern. The matched line is the delimiter between log messages.\n\nOne example of a use case for using this format is for parsing output such as a stack dump, which might otherwise be logged in multiple entries. The correct pattern allows it to be captured in a single entry.\n\nFor more information, see [awslogs-datetime-format](https://docs.aws.amazon.com/https://docs.docker.com/config/containers/logging/awslogs/#awslogs-datetime-format) .\n\nYou cannot configure both the `awslogs-datetime-format` and `awslogs-multiline-pattern` options.\n\n> Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance.\n- **awslogs-multiline-pattern** - Required: No\n\nThis option defines a multiline start pattern that uses a regular expression. A log message consists of a line that matches the pattern and any following lines that don\u2019t match the pattern. The matched line is the delimiter between log messages.\n\nFor more information, see [awslogs-multiline-pattern](https://docs.aws.amazon.com/https://docs.docker.com/config/containers/logging/awslogs/#awslogs-multiline-pattern) .\n\nThis option is ignored if `awslogs-datetime-format` is also configured.\n\nYou cannot configure both the `awslogs-datetime-format` and `awslogs-multiline-pattern` options.\n\n> Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance.\n- **mode** - Required: No\n\nValid values: `non-blocking` | `blocking`\n\nThis option defines the delivery mode of log messages from the container to CloudWatch Logs. The delivery mode you choose affects application availability when the flow of logs from container to CloudWatch is interrupted.\n\nIf you use the `blocking` mode and the flow of logs to CloudWatch is interrupted, calls from container code to write to the `stdout` and `stderr` streams will block. The logging thread of the application will block as a result. This may cause the application to become unresponsive and lead to container healthcheck failure.\n\nIf you use the `non-blocking` mode, the container's logs are instead stored in an in-memory intermediate buffer configured with the `max-buffer-size` option. This prevents the application from becoming unresponsive when logs cannot be sent to CloudWatch. We recommend using this mode if you want to ensure service availability and are okay with some log loss. For more information, see [Preventing log loss with non-blocking mode in the `awslogs` container log driver](https://docs.aws.amazon.com/containers/preventing-log-loss-with-non-blocking-mode-in-the-awslogs-container-log-driver/) .\n- **max-buffer-size** - Required: No\n\nDefault value: `1m`\n\nWhen `non-blocking` mode is used, the `max-buffer-size` log option controls the size of the buffer that's used for intermediate message storage. Make sure to specify an adequate buffer size based on your application. When the buffer fills up, further logs cannot be stored. Logs that cannot be stored are lost.\n\nTo route logs using the `splunk` log router, you need to specify a `splunk-token` and a `splunk-url` .\n\nWhen you use the `awsfirelens` log router to route logs to an AWS Service or AWS Partner Network destination for log storage and analytics, you can set the `log-driver-buffer-limit` option to limit the number of events that are buffered in memory, before being sent to the log router container. It can help to resolve potential log loss issue because high throughput might result in memory running out for the buffer inside of Docker.\n\nOther options you can specify when using `awsfirelens` to route logs depend on the destination. When you export logs to Amazon Data Firehose, you can specify the AWS Region with `region` and a name for the log stream with `delivery_stream` .\n\nWhen you export logs to Amazon Kinesis Data Streams, you can specify an AWS Region with `region` and a data stream name with `stream` .\n\nWhen you export logs to Amazon OpenSearch Service, you can specify options like `Name` , `Host` (OpenSearch Service endpoint without protocol), `Port` , `Index` , `Type` , `Aws_auth` , `Aws_region` , `Suppress_Type_Name` , and `tls` .\n\nWhen you export logs to Amazon S3, you can specify the bucket using the `bucket` option. You can also specify `region` , `total_file_size` , `upload_timeout` , and `use_put_object` as options.\n\nThis parameter requires version 1.19 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version --format '{{.Server.APIVersion}}'`", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" @@ -85293,7 +85285,7 @@ "additionalProperties": false, "properties": { "HardLimit": { - "markdownDescription": "The hard limit for the `ulimit` type.", + "markdownDescription": "The hard limit for the `ulimit` type. The value can be specified in bytes, seconds, or as a count, depending on the `type` of the `ulimit` .", "title": "HardLimit", "type": "number" }, @@ -85303,7 +85295,7 @@ "type": "string" }, "SoftLimit": { - "markdownDescription": "The soft limit for the `ulimit` type.", + "markdownDescription": "The soft limit for the `ulimit` type. The value can be specified in bytes, seconds, or as a count, depending on the `type` of the `ulimit` .", "title": "SoftLimit", "type": "number" } @@ -85507,7 +85499,7 @@ "items": { "type": "string" }, - "markdownDescription": "The IDs of the security groups associated with the task or service. If you don't specify a security group, the default security group for the VPC is used. There's a limit of 5 security groups that can be specified per `AwsVpcConfiguration` .\n\n> All specified security groups must be from the same VPC.", + "markdownDescription": "The IDs of the security groups associated with the task or service. If you don't specify a security group, the default security group for the VPC is used. There's a limit of 5 security groups that can be specified per `awsvpcConfiguration` .\n\n> All specified security groups must be from the same VPC.", "title": "SecurityGroups", "type": "array" }, @@ -85515,7 +85507,7 @@ "items": { "type": "string" }, - "markdownDescription": "The IDs of the subnets associated with the task or service. There's a limit of 16 subnets that can be specified per `AwsVpcConfiguration` .\n\n> All specified subnets must be from the same VPC.", + "markdownDescription": "The IDs of the subnets associated with the task or service. There's a limit of 16 subnets that can be specified per `awsvpcConfiguration` .\n\n> All specified subnets must be from the same VPC.", "title": "Subnets", "type": "array" } @@ -87874,12 +87866,12 @@ "properties": { "OnDemandSpecification": { "$ref": "#/definitions/AWS::EMR::Cluster.OnDemandProvisioningSpecification", - "markdownDescription": "The launch specification for On-Demand Instances in the instance fleet, which determines the allocation strategy.\n\n> The instance fleet configuration is available only in Amazon EMR releases 4.8.0 and later, excluding 5.0.x versions. On-Demand Instances allocation strategy is available in Amazon EMR releases 5.12.1 and later.", + "markdownDescription": "The launch specification for On-Demand Instances in the instance fleet, which determines the allocation strategy and capacity reservation options.\n\n> The instance fleet configuration is available only in Amazon EMR releases 4.8.0 and later, excluding 5.0.x versions. On-Demand Instances allocation strategy is available in Amazon EMR releases 5.12.1 and later.", "title": "OnDemandSpecification" }, "SpotSpecification": { "$ref": "#/definitions/AWS::EMR::Cluster.SpotProvisioningSpecification", - "markdownDescription": "The launch specification for Spot instances in the fleet, which determines the defined duration, provisioning timeout behavior, and allocation strategy.", + "markdownDescription": "The launch specification for Spot instances in the fleet, which determines the allocation strategy, defined duration, and provisioning timeout behavior.", "title": "SpotSpecification" } }, @@ -88624,12 +88616,12 @@ "properties": { "OnDemandSpecification": { "$ref": "#/definitions/AWS::EMR::InstanceFleetConfig.OnDemandProvisioningSpecification", - "markdownDescription": "The launch specification for On-Demand Instances in the instance fleet, which determines the allocation strategy.\n\n> The instance fleet configuration is available only in Amazon EMR releases 4.8.0 and later, excluding 5.0.x versions. On-Demand Instances allocation strategy is available in Amazon EMR releases 5.12.1 and later.", + "markdownDescription": "The launch specification for On-Demand Instances in the instance fleet, which determines the allocation strategy and capacity reservation options.\n\n> The instance fleet configuration is available only in Amazon EMR releases 4.8.0 and later, excluding 5.0.x versions. On-Demand Instances allocation strategy is available in Amazon EMR releases 5.12.1 and later.", "title": "OnDemandSpecification" }, "SpotSpecification": { "$ref": "#/definitions/AWS::EMR::InstanceFleetConfig.SpotProvisioningSpecification", - "markdownDescription": "The launch specification for Spot instances in the fleet, which determines the defined duration, provisioning timeout behavior, and allocation strategy.", + "markdownDescription": "The launch specification for Spot instances in the fleet, which determines the allocation strategy, defined duration, and provisioning timeout behavior.", "title": "SpotSpecification" } }, @@ -90340,12 +90332,12 @@ "type": "string" }, "AutoMinorVersionUpgrade": { - "markdownDescription": "If you are running Redis engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next minor version upgrade campaign. This parameter is disabled for previous versions.", + "markdownDescription": "If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next minor version upgrade campaign. This parameter is disabled for previous versions.", "title": "AutoMinorVersionUpgrade", "type": "boolean" }, "CacheNodeType": { - "markdownDescription": "The compute and memory capacity of the nodes in the node group (shard).\n\nThe following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts. Changing the CacheNodeType of a Memcached instance is currently not supported. If you need to scale using Memcached, we recommend forcing a replacement update by changing the `LogicalResourceId` of the resource.\n\n- General purpose:\n\n- Current generation:\n\n*M6g node types:* `cache.m6g.large` , `cache.m6g.xlarge` , `cache.m6g.2xlarge` , `cache.m6g.4xlarge` , `cache.m6g.8xlarge` , `cache.m6g.12xlarge` , `cache.m6g.16xlarge` , `cache.m6g.24xlarge`\n\n*M5 node types:* `cache.m5.large` , `cache.m5.xlarge` , `cache.m5.2xlarge` , `cache.m5.4xlarge` , `cache.m5.12xlarge` , `cache.m5.24xlarge`\n\n*M4 node types:* `cache.m4.large` , `cache.m4.xlarge` , `cache.m4.2xlarge` , `cache.m4.4xlarge` , `cache.m4.10xlarge`\n\n*T4g node types:* `cache.t4g.micro` , `cache.t4g.small` , `cache.t4g.medium`\n\n*T3 node types:* `cache.t3.micro` , `cache.t3.small` , `cache.t3.medium`\n\n*T2 node types:* `cache.t2.micro` , `cache.t2.small` , `cache.t2.medium`\n- Previous generation: (not recommended)\n\n*T1 node types:* `cache.t1.micro`\n\n*M1 node types:* `cache.m1.small` , `cache.m1.medium` , `cache.m1.large` , `cache.m1.xlarge`\n\n*M3 node types:* `cache.m3.medium` , `cache.m3.large` , `cache.m3.xlarge` , `cache.m3.2xlarge`\n- Compute optimized:\n\n- Previous generation: (not recommended)\n\n*C1 node types:* `cache.c1.xlarge`\n- Memory optimized:\n\n- Current generation:\n\n*R6gd node types:* `cache.r6gd.xlarge` , `cache.r6gd.2xlarge` , `cache.r6gd.4xlarge` , `cache.r6gd.8xlarge` , `cache.r6gd.12xlarge` , `cache.r6gd.16xlarge`\n\n> The `r6gd` family is available in the following regions: `us-east-2` , `us-east-1` , `us-west-2` , `us-west-1` , `eu-west-1` , `eu-central-1` , `ap-northeast-1` , `ap-southeast-1` , `ap-southeast-2` . \n\n*R6g node types:* `cache.r6g.large` , `cache.r6g.xlarge` , `cache.r6g.2xlarge` , `cache.r6g.4xlarge` , `cache.r6g.8xlarge` , `cache.r6g.12xlarge` , `cache.r6g.16xlarge` , `cache.r6g.24xlarge`\n\n*R5 node types:* `cache.r5.large` , `cache.r5.xlarge` , `cache.r5.2xlarge` , `cache.r5.4xlarge` , `cache.r5.12xlarge` , `cache.r5.24xlarge`\n\n*R4 node types:* `cache.r4.large` , `cache.r4.xlarge` , `cache.r4.2xlarge` , `cache.r4.4xlarge` , `cache.r4.8xlarge` , `cache.r4.16xlarge`\n- Previous generation: (not recommended)\n\n*M2 node types:* `cache.m2.xlarge` , `cache.m2.2xlarge` , `cache.m2.4xlarge`\n\n*R3 node types:* `cache.r3.large` , `cache.r3.xlarge` , `cache.r3.2xlarge` , `cache.r3.4xlarge` , `cache.r3.8xlarge`\n\nFor region availability, see [Supported Node Types by Region](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion)\n\n*Additional node type info*\n\n- All current generation instance types are created in Amazon VPC by default.\n- Redis append-only files (AOF) are not supported for T1 or T2 instances.\n- Redis Multi-AZ with automatic failover is not supported on T1 instances.\n- Redis configuration variables `appendonly` and `appendfsync` are not supported on Redis version 2.8.22 and later.", + "markdownDescription": "The compute and memory capacity of the nodes in the node group (shard).\n\nThe following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts. Changing the CacheNodeType of a Memcached instance is currently not supported. If you need to scale using Memcached, we recommend forcing a replacement update by changing the `LogicalResourceId` of the resource.\n\n- General purpose:\n\n- Current generation:\n\n*M6g node types:* `cache.m6g.large` , `cache.m6g.xlarge` , `cache.m6g.2xlarge` , `cache.m6g.4xlarge` , `cache.m6g.8xlarge` , `cache.m6g.12xlarge` , `cache.m6g.16xlarge` , `cache.m6g.24xlarge`\n\n*M5 node types:* `cache.m5.large` , `cache.m5.xlarge` , `cache.m5.2xlarge` , `cache.m5.4xlarge` , `cache.m5.12xlarge` , `cache.m5.24xlarge`\n\n*M4 node types:* `cache.m4.large` , `cache.m4.xlarge` , `cache.m4.2xlarge` , `cache.m4.4xlarge` , `cache.m4.10xlarge`\n\n*T4g node types:* `cache.t4g.micro` , `cache.t4g.small` , `cache.t4g.medium`\n\n*T3 node types:* `cache.t3.micro` , `cache.t3.small` , `cache.t3.medium`\n\n*T2 node types:* `cache.t2.micro` , `cache.t2.small` , `cache.t2.medium`\n- Previous generation: (not recommended)\n\n*T1 node types:* `cache.t1.micro`\n\n*M1 node types:* `cache.m1.small` , `cache.m1.medium` , `cache.m1.large` , `cache.m1.xlarge`\n\n*M3 node types:* `cache.m3.medium` , `cache.m3.large` , `cache.m3.xlarge` , `cache.m3.2xlarge`\n- Compute optimized:\n\n- Previous generation: (not recommended)\n\n*C1 node types:* `cache.c1.xlarge`\n- Memory optimized:\n\n- Current generation:\n\n*R6gd node types:* `cache.r6gd.xlarge` , `cache.r6gd.2xlarge` , `cache.r6gd.4xlarge` , `cache.r6gd.8xlarge` , `cache.r6gd.12xlarge` , `cache.r6gd.16xlarge`\n\n> The `r6gd` family is available in the following regions: `us-east-2` , `us-east-1` , `us-west-2` , `us-west-1` , `eu-west-1` , `eu-central-1` , `ap-northeast-1` , `ap-southeast-1` , `ap-southeast-2` . \n\n*R6g node types:* `cache.r6g.large` , `cache.r6g.xlarge` , `cache.r6g.2xlarge` , `cache.r6g.4xlarge` , `cache.r6g.8xlarge` , `cache.r6g.12xlarge` , `cache.r6g.16xlarge` , `cache.r6g.24xlarge`\n\n*R5 node types:* `cache.r5.large` , `cache.r5.xlarge` , `cache.r5.2xlarge` , `cache.r5.4xlarge` , `cache.r5.12xlarge` , `cache.r5.24xlarge`\n\n*R4 node types:* `cache.r4.large` , `cache.r4.xlarge` , `cache.r4.2xlarge` , `cache.r4.4xlarge` , `cache.r4.8xlarge` , `cache.r4.16xlarge`\n- Previous generation: (not recommended)\n\n*M2 node types:* `cache.m2.xlarge` , `cache.m2.2xlarge` , `cache.m2.4xlarge`\n\n*R3 node types:* `cache.r3.large` , `cache.r3.xlarge` , `cache.r3.2xlarge` , `cache.r3.4xlarge` , `cache.r3.8xlarge`\n\nFor region availability, see [Supported Node Types by Region](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/CacheNodes.SupportedTypes.html#CacheNodes.SupportedTypesByRegion)\n\n*Additional node type info*\n\n- All current generation instance types are created in Amazon VPC by default.\n- Redis OSS append-only files (AOF) are not supported for T1 or T2 instances.\n- Redis OSS Multi-AZ with automatic failover is not supported on T1 instances.\n- Redis OSS configuration variables `appendonly` and `appendfsync` are not supported on Redis OSS version 2.8.22 and later.", "title": "CacheNodeType", "type": "string" }, @@ -90383,7 +90375,7 @@ "type": "string" }, "IpDiscovery": { - "markdownDescription": "The network type you choose when modifying a cluster, either `ipv4` | `ipv6` . IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", + "markdownDescription": "The network type you choose when modifying a cluster, either `ipv4` | `ipv6` . IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", "title": "IpDiscovery", "type": "string" }, @@ -90396,7 +90388,7 @@ "type": "array" }, "NetworkType": { - "markdownDescription": "Must be either `ipv4` | `ipv6` | `dual_stack` . IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", + "markdownDescription": "Must be either `ipv4` | `ipv6` | `dual_stack` . IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", "title": "NetworkType", "type": "string" }, @@ -90437,12 +90429,12 @@ "items": { "type": "string" }, - "markdownDescription": "A single-element string list containing an Amazon Resource Name (ARN) that uniquely identifies a Redis RDB snapshot file stored in Amazon S3. The snapshot file is used to populate the node group (shard). The Amazon S3 object name in the ARN cannot contain any commas.\n\n> This parameter is only valid if the `Engine` parameter is `redis` . \n\nExample of an Amazon S3 ARN: `arn:aws:s3:::my_bucket/snapshot1.rdb`", + "markdownDescription": "A single-element string list containing an Amazon Resource Name (ARN) that uniquely identifies a Redis OSS RDB snapshot file stored in Amazon S3. The snapshot file is used to populate the node group (shard). The Amazon S3 object name in the ARN cannot contain any commas.\n\n> This parameter is only valid if the `Engine` parameter is `redis` . \n\nExample of an Amazon S3 ARN: `arn:aws:s3:::my_bucket/snapshot1.rdb`", "title": "SnapshotArns", "type": "array" }, "SnapshotName": { - "markdownDescription": "The name of a Redis snapshot from which to restore data into the new node group (shard). The snapshot status changes to `restoring` while the new node group (shard) is being created.\n\n> This parameter is only valid if the `Engine` parameter is `redis` .", + "markdownDescription": "The name of a Redis OSS snapshot from which to restore data into the new node group (shard). The snapshot status changes to `restoring` while the new node group (shard) is being created.\n\n> This parameter is only valid if the `Engine` parameter is `redis` .", "title": "SnapshotName", "type": "string" }, @@ -90618,7 +90610,7 @@ "additionalProperties": false, "properties": { "AutomaticFailoverEnabled": { - "markdownDescription": "Specifies whether a read-only replica is automatically promoted to read/write primary if the existing primary fails.\n\n`AutomaticFailoverEnabled` must be enabled for Redis (cluster mode enabled) replication groups.", + "markdownDescription": "Specifies whether a read-only replica is automatically promoted to read/write primary if the existing primary fails.\n\n`AutomaticFailoverEnabled` must be enabled for Redis OSS (cluster mode enabled) replication groups.", "title": "AutomaticFailoverEnabled", "type": "boolean" }, @@ -90633,7 +90625,7 @@ "type": "string" }, "EngineVersion": { - "markdownDescription": "The Elasticache Redis engine version.", + "markdownDescription": "The Elasticache Redis OSS engine version.", "title": "EngineVersion", "type": "string" }, @@ -90744,7 +90736,7 @@ "additionalProperties": false, "properties": { "NodeGroupId": { - "markdownDescription": "Either the ElastiCache for Redis supplied 4-digit id or a user supplied id for the node group these configuration values apply to.", + "markdownDescription": "Either the ElastiCache (Redis OSS) supplied 4-digit id or a user supplied id for the node group these configuration values apply to.", "title": "NodeGroupId", "type": "string" }, @@ -90887,22 +90879,22 @@ "additionalProperties": false, "properties": { "AtRestEncryptionEnabled": { - "markdownDescription": "A flag that enables encryption at rest when set to `true` .\n\nYou cannot modify the value of `AtRestEncryptionEnabled` after the replication group is created. To enable encryption at rest on a replication group you must set `AtRestEncryptionEnabled` to `true` when you create the replication group.\n\n*Required:* Only available when creating a replication group in an Amazon VPC using redis version `3.2.6` or `4.x` onward.\n\nDefault: `false`", + "markdownDescription": "A flag that enables encryption at rest when set to `true` .\n\nYou cannot modify the value of `AtRestEncryptionEnabled` after the replication group is created. To enable encryption at rest on a replication group you must set `AtRestEncryptionEnabled` to `true` when you create the replication group.\n\n*Required:* Only available when creating a replication group in an Amazon VPC using Redis OSS version `3.2.6` or `4.x` onward.\n\nDefault: `false`", "title": "AtRestEncryptionEnabled", "type": "boolean" }, "AuthToken": { - "markdownDescription": "*Reserved parameter.* The password used to access a password protected server.\n\n`AuthToken` can be specified only on replication groups where `TransitEncryptionEnabled` is `true` . For more information, see [Authenticating Users with the Redis AUTH Command](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/auth.html) .\n\n> For HIPAA compliance, you must specify `TransitEncryptionEnabled` as `true` , an `AuthToken` , and a `CacheSubnetGroup` . \n\nPassword constraints:\n\n- Must be only printable ASCII characters.\n- Must be at least 16 characters and no more than 128 characters in length.\n- Nonalphanumeric characters are restricted to (!, &, #, $, ^, <, >, -, ).\n\nFor more information, see [AUTH password](https://docs.aws.amazon.com/http://redis.io/commands/AUTH) at http://redis.io/commands/AUTH.\n\n> If ADDING the AuthToken, update requires [Replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .", + "markdownDescription": "*Reserved parameter.* The password used to access a password protected server.\n\n`AuthToken` can be specified only on replication groups where `TransitEncryptionEnabled` is `true` . For more information, see [Authenticating Users with the Redis OSS AUTH Command](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/auth.html) .\n\n> For HIPAA compliance, you must specify `TransitEncryptionEnabled` as `true` , an `AuthToken` , and a `CacheSubnetGroup` . \n\nPassword constraints:\n\n- Must be only printable ASCII characters.\n- Must be at least 16 characters and no more than 128 characters in length.\n- Nonalphanumeric characters are restricted to (!, &, #, $, ^, <, >, -, ).\n\nFor more information, see [AUTH password](https://docs.aws.amazon.com/http://redis.io/commands/AUTH) at http://redis.io/commands/AUTH.\n\n> If ADDING the AuthToken, update requires [Replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .", "title": "AuthToken", "type": "string" }, "AutoMinorVersionUpgrade": { - "markdownDescription": "If you are running Redis engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next minor version upgrade campaign. This parameter is disabled for previous versions.", + "markdownDescription": "If you are running Redis OSS engine version 6.0 or later, set this parameter to yes if you want to opt-in to the next minor version upgrade campaign. This parameter is disabled for previous versions.", "title": "AutoMinorVersionUpgrade", "type": "boolean" }, "AutomaticFailoverEnabled": { - "markdownDescription": "Specifies whether a read-only replica is automatically promoted to read/write primary if the existing primary fails.\n\n`AutomaticFailoverEnabled` must be enabled for Redis (cluster mode enabled) replication groups.\n\nDefault: false", + "markdownDescription": "Specifies whether a read-only replica is automatically promoted to read/write primary if the existing primary fails.\n\n`AutomaticFailoverEnabled` must be enabled for Redis OSS (cluster mode enabled) replication groups.\n\nDefault: false", "title": "AutomaticFailoverEnabled", "type": "boolean" }, @@ -90912,7 +90904,7 @@ "type": "string" }, "CacheParameterGroupName": { - "markdownDescription": "The name of the parameter group to associate with this replication group. If this argument is omitted, the default cache parameter group for the specified engine is used.\n\nIf you are running Redis version 3.2.4 or later, only one node group (shard), and want to use a default parameter group, we recommend that you specify the parameter group by name.\n\n- To create a Redis (cluster mode disabled) replication group, use `CacheParameterGroupName=default.redis3.2` .\n- To create a Redis (cluster mode enabled) replication group, use `CacheParameterGroupName=default.redis3.2.cluster.on` .", + "markdownDescription": "The name of the parameter group to associate with this replication group. If this argument is omitted, the default cache parameter group for the specified engine is used.\n\nIf you are running Redis OSS version 3.2.4 or later, only one node group (shard), and want to use a default parameter group, we recommend that you specify the parameter group by name.\n\n- To create a Redis OSS (cluster mode disabled) replication group, use `CacheParameterGroupName=default.redis3.2` .\n- To create a Redis OSS (cluster mode enabled) replication group, use `CacheParameterGroupName=default.redis3.2.cluster.on` .", "title": "CacheParameterGroupName", "type": "string" }, @@ -90930,7 +90922,7 @@ "type": "string" }, "ClusterMode": { - "markdownDescription": "Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first set the cluster mode to Compatible. Compatible mode allows your Redis clients to connect using both cluster mode enabled and cluster mode disabled. After you migrate all Redis clients to use cluster mode enabled, you can then complete cluster mode configuration and set the cluster mode to Enabled. For more information, see [Modify cluster mode](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/modify-cluster-mode.html) .", + "markdownDescription": "Enabled or Disabled. To modify cluster mode from Disabled to Enabled, you must first set the cluster mode to Compatible. Compatible mode allows your Redis OSS clients to connect using both cluster mode enabled and cluster mode disabled. After you migrate all Redis OSS clients to use cluster mode enabled, you can then complete cluster mode configuration and set the cluster mode to Enabled. For more information, see [Modify cluster mode](https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/modify-cluster-mode.html) .", "title": "ClusterMode", "type": "string" }, @@ -90955,7 +90947,7 @@ "type": "string" }, "IpDiscovery": { - "markdownDescription": "The network type you choose when creating a replication group, either `ipv4` | `ipv6` . IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", + "markdownDescription": "The network type you choose when creating a replication group, either `ipv4` | `ipv6` . IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", "title": "IpDiscovery", "type": "string" }, @@ -90978,7 +90970,7 @@ "type": "boolean" }, "NetworkType": { - "markdownDescription": "Must be either `ipv4` | `ipv6` | `dual_stack` . IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", + "markdownDescription": "Must be either `ipv4` | `ipv6` | `dual_stack` . IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the [Nitro system](https://docs.aws.amazon.com/ec2/nitro/) .", "title": "NetworkType", "type": "string" }, @@ -90986,7 +90978,7 @@ "items": { "$ref": "#/definitions/AWS::ElastiCache::ReplicationGroup.NodeGroupConfiguration" }, - "markdownDescription": "`NodeGroupConfiguration` is a property of the `AWS::ElastiCache::ReplicationGroup` resource that configures an Amazon ElastiCache (ElastiCache) Redis cluster node group.\n\nIf you set [UseOnlineResharding](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-updatepolicy.html#cfn-attributes-updatepolicy-useonlineresharding) to `true` , you can update `NodeGroupConfiguration` without interruption. When `UseOnlineResharding` is set to `false` , or is not specified, updating `NodeGroupConfiguration` results in [replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .", + "markdownDescription": "`NodeGroupConfiguration` is a property of the `AWS::ElastiCache::ReplicationGroup` resource that configures an Amazon ElastiCache (ElastiCache) Redis OSS cluster node group.\n\nIf you set [UseOnlineResharding](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-updatepolicy.html#cfn-attributes-updatepolicy-useonlineresharding) to `true` , you can update `NodeGroupConfiguration` without interruption. When `UseOnlineResharding` is set to `false` , or is not specified, updating `NodeGroupConfiguration` results in [replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .", "title": "NodeGroupConfiguration", "type": "array" }, @@ -91001,7 +90993,7 @@ "type": "number" }, "NumNodeGroups": { - "markdownDescription": "An optional parameter that specifies the number of node groups (shards) for this Redis (cluster mode enabled) replication group. For Redis (cluster mode disabled) either omit this parameter or set it to 1.\n\nIf you set [UseOnlineResharding](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-updatepolicy.html#cfn-attributes-updatepolicy-useonlineresharding) to `true` , you can update `NumNodeGroups` without interruption. When `UseOnlineResharding` is set to `false` , or is not specified, updating `NumNodeGroups` results in [replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .\n\nDefault: 1", + "markdownDescription": "An optional parameter that specifies the number of node groups (shards) for this Redis OSS (cluster mode enabled) replication group. For Redis OSS (cluster mode disabled) either omit this parameter or set it to 1.\n\nIf you set [UseOnlineResharding](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-updatepolicy.html#cfn-attributes-updatepolicy-useonlineresharding) to `true` , you can update `NumNodeGroups` without interruption. When `UseOnlineResharding` is set to `false` , or is not specified, updating `NumNodeGroups` results in [replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) .\n\nDefault: 1", "title": "NumNodeGroups", "type": "number" }, @@ -91055,7 +91047,7 @@ "items": { "type": "string" }, - "markdownDescription": "A list of Amazon Resource Names (ARN) that uniquely identify the Redis RDB snapshot files stored in Amazon S3. The snapshot files are used to populate the new replication group. The Amazon S3 object name in the ARN cannot contain any commas. The new replication group will have the number of node groups (console: shards) specified by the parameter *NumNodeGroups* or the number of node groups configured by *NodeGroupConfiguration* regardless of the number of ARNs specified here.\n\nExample of an Amazon S3 ARN: `arn:aws:s3:::my_bucket/snapshot1.rdb`", + "markdownDescription": "A list of Amazon Resource Names (ARN) that uniquely identify the Redis OSS RDB snapshot files stored in Amazon S3. The snapshot files are used to populate the new replication group. The Amazon S3 object name in the ARN cannot contain any commas. The new replication group will have the number of node groups (console: shards) specified by the parameter *NumNodeGroups* or the number of node groups configured by *NodeGroupConfiguration* regardless of the number of ARNs specified here.\n\nExample of an Amazon S3 ARN: `arn:aws:s3:::my_bucket/snapshot1.rdb`", "title": "SnapshotArns", "type": "array" }, @@ -91075,7 +91067,7 @@ "type": "string" }, "SnapshottingClusterId": { - "markdownDescription": "The cluster ID that is used as the daily snapshot source for the replication group. This parameter cannot be set for Redis (cluster mode enabled) replication groups.", + "markdownDescription": "The cluster ID that is used as the daily snapshot source for the replication group. This parameter cannot be set for Redis OSS (cluster mode enabled) replication groups.", "title": "SnapshottingClusterId", "type": "string" }, @@ -91088,12 +91080,12 @@ "type": "array" }, "TransitEncryptionEnabled": { - "markdownDescription": "A flag that enables in-transit encryption when set to `true` .\n\nYou cannot modify the value of `TransitEncryptionEnabled` after the cluster is created. To enable in-transit encryption on a cluster you must set `TransitEncryptionEnabled` to `true` when you create a cluster.\n\nThis parameter is valid only if the `Engine` parameter is `redis` , the `EngineVersion` parameter is `3.2.6` or `4.x` onward, and the cluster is being created in an Amazon VPC.\n\nIf you enable in-transit encryption, you must also specify a value for `CacheSubnetGroup` .\n\n*Required:* Only available when creating a replication group in an Amazon VPC using redis version `3.2.6` or `4.x` onward.\n\nDefault: `false`\n\n> For HIPAA compliance, you must specify `TransitEncryptionEnabled` as `true` , an `AuthToken` , and a `CacheSubnetGroup` .", + "markdownDescription": "A flag that enables in-transit encryption when set to `true` .\n\nYou cannot modify the value of `TransitEncryptionEnabled` after the cluster is created. To enable in-transit encryption on a cluster you must set `TransitEncryptionEnabled` to `true` when you create a cluster.\n\nThis parameter is valid only if the `Engine` parameter is `redis` , the `EngineVersion` parameter is `3.2.6` or `4.x` onward, and the cluster is being created in an Amazon VPC.\n\nIf you enable in-transit encryption, you must also specify a value for `CacheSubnetGroup` .\n\n*Required:* Only available when creating a replication group in an Amazon VPC using Redis OSS version `3.2.6` or `4.x` onward.\n\nDefault: `false`\n\n> For HIPAA compliance, you must specify `TransitEncryptionEnabled` as `true` , an `AuthToken` , and a `CacheSubnetGroup` .", "title": "TransitEncryptionEnabled", "type": "boolean" }, "TransitEncryptionMode": { - "markdownDescription": "A setting that allows you to migrate your clients to use in-transit encryption, with no downtime.\n\nWhen setting `TransitEncryptionEnabled` to `true` , you can set your `TransitEncryptionMode` to `preferred` in the same request, to allow both encrypted and unencrypted connections at the same time. Once you migrate all your Redis clients to use encrypted connections you can modify the value to `required` to allow encrypted connections only.\n\nSetting `TransitEncryptionMode` to `required` is a two-step process that requires you to first set the `TransitEncryptionMode` to `preferred` , after that you can set `TransitEncryptionMode` to `required` .\n\nThis process will not trigger the replacement of the replication group.", + "markdownDescription": "A setting that allows you to migrate your clients to use in-transit encryption, with no downtime.\n\nWhen setting `TransitEncryptionEnabled` to `true` , you can set your `TransitEncryptionMode` to `preferred` in the same request, to allow both encrypted and unencrypted connections at the same time. Once you migrate all your Redis OSS clients to use encrypted connections you can modify the value to `required` to allow encrypted connections only.\n\nSetting `TransitEncryptionMode` to `required` is a two-step process that requires you to first set the `TransitEncryptionMode` to `preferred` , after that you can set `TransitEncryptionMode` to `required` .\n\nThis process will not trigger the replacement of the replication group.", "title": "TransitEncryptionMode", "type": "string" }, @@ -91212,7 +91204,7 @@ "additionalProperties": false, "properties": { "NodeGroupId": { - "markdownDescription": "Either the ElastiCache for Redis supplied 4-digit id or a user supplied id for the node group these configuration values apply to.", + "markdownDescription": "Either the ElastiCache (Redis OSS) supplied 4-digit id or a user supplied id for the node group these configuration values apply to.", "title": "NodeGroupId", "type": "string" }, @@ -91436,7 +91428,7 @@ "title": "CacheUsageLimits" }, "DailySnapshotTime": { - "markdownDescription": "The daily time that a cache snapshot will be created. Default is NULL, i.e. snapshots will not be created at a specific time on a daily basis. Available for Redis and Serverless Memcached only.", + "markdownDescription": "The daily time that a cache snapshot will be created. Default is NULL, i.e. snapshots will not be created at a specific time on a daily basis. Available for Redis OSS and Serverless Memcached only.", "title": "DailySnapshotTime", "type": "string" }, @@ -91497,7 +91489,7 @@ "type": "array" }, "SnapshotRetentionLimit": { - "markdownDescription": "The current setting for the number of serverless cache snapshots the system will retain. Available for Redis and Serverless Memcached only.", + "markdownDescription": "The current setting for the number of serverless cache snapshots the system will retain. Available for Redis OSS and Serverless Memcached only.", "title": "SnapshotRetentionLimit", "type": "number" }, @@ -91518,7 +91510,7 @@ "type": "array" }, "UserGroupId": { - "markdownDescription": "The identifier of the user group associated with the serverless cache. Available for Redis only. Default is NULL.", + "markdownDescription": "The identifier of the user group associated with the serverless cache. Available for Redis OSS only. Default is NULL.", "title": "UserGroupId", "type": "string" } @@ -94255,7 +94247,7 @@ "items": { "$ref": "#/definitions/AWS::ElasticLoadBalancingV2::TargetGroup.TargetGroupAttribute" }, - "markdownDescription": "The attributes.", + "markdownDescription": "The target group attributes.", "title": "TargetGroupAttributes", "type": "array" }, @@ -95049,7 +95041,7 @@ }, "IdMappingTechniques": { "$ref": "#/definitions/AWS::EntityResolution::IdMappingWorkflow.IdMappingTechniques", - "markdownDescription": "An object which defines the `idMappingType` and the `providerProperties` .", + "markdownDescription": "An object which defines the ID mapping technique and any additional configurations.", "title": "IdMappingTechniques" }, "InputSourceConfig": { @@ -95136,7 +95128,7 @@ "additionalProperties": false, "properties": { "InputSourceARN": { - "markdownDescription": "An AWS Glue table ARN for the input source table.", + "markdownDescription": "An AWS Glue table Amazon Resource Name (ARN) or a matching workflow ARN for the input source table.", "title": "InputSourceARN", "type": "string" }, @@ -95146,7 +95138,7 @@ "type": "string" }, "Type": { - "markdownDescription": "The type of ID namespace. There are two types: `SOURCE` and `TARGET` .\n\nThe `SOURCE` contains configurations for `sourceId` data that will be processed in an ID mapping workflow.\n\nThe `TARGET` contains a configuration of `targetId` to which all `sourceIds` will resolve to.", + "markdownDescription": "The type of ID namespace. There are two types: `SOURCE` and `TARGET` .\n\nThe `SOURCE` contains configurations for `sourceId` data that will be processed in an ID mapping workflow.\n\nThe `TARGET` contains a configuration of `targetId` which all `sourceIds` will resolve to.", "title": "Type", "type": "string" } @@ -95294,7 +95286,7 @@ "type": "array" }, "Type": { - "markdownDescription": "The type of ID namespace. There are two types: `SOURCE` and `TARGET` .\n\nThe `SOURCE` contains configurations for `sourceId` data that will be processed in an ID mapping workflow.\n\nThe `TARGET` contains a configuration of `targetId` to which all `sourceIds` will resolve to.", + "markdownDescription": "The type of ID namespace. There are two types: `SOURCE` and `TARGET` .\n\nThe `SOURCE` contains configurations for `sourceId` data that will be processed in an ID mapping workflow.\n\nThe `TARGET` contains a configuration of `targetId` which all `sourceIds` will resolve to.", "title": "Type", "type": "string" } @@ -95349,7 +95341,7 @@ "additionalProperties": false, "properties": { "InputSourceARN": { - "markdownDescription": "An AWS Glue table ARN for the input source table.", + "markdownDescription": "An AWS Glue table Amazon Resource Name (ARN) or a matching workflow ARN for the input source table.", "title": "InputSourceARN", "type": "string" }, @@ -95668,7 +95660,7 @@ "additionalProperties": false, "properties": { "AttributeMatchingModel": { - "markdownDescription": "The comparison type. You can either choose `ONE_TO_ONE` or `MANY_TO_MANY` as the AttributeMatchingModel. When choosing `MANY_TO_MANY` , the system can match attributes across the sub-types of an attribute type. For example, if the value of the `Email` field of Profile A and the value of `BusinessEmail` field of Profile B matches, the two profiles are matched on the `Email` type. When choosing `ONE_TO_ONE` ,the system can only match if the sub-types are exact matches. For example, only when the value of the `Email` field of Profile A and the value of the `Email` field of Profile B matches, the two profiles are matched on the `Email` type.", + "markdownDescription": "The comparison type. You can either choose `ONE_TO_ONE` or `MANY_TO_MANY` as the `attributeMatchingModel` .\n\nIf you choose `MANY_TO_MANY` , the system can match attributes across the sub-types of an attribute type. For example, if the value of the `Email` field of Profile A and the value of `BusinessEmail` field of Profile B matches, the two profiles are matched on the `Email` attribute type.\n\nIf you choose `ONE_TO_ONE` , the system can only match attributes if the sub-types are an exact match. For example, for the `Email` attribute type, the system will only consider it a match if the value of the `Email` field of Profile A matches the value of the `Email` field of Profile B.", "title": "AttributeMatchingModel", "type": "string" }, @@ -95889,7 +95881,7 @@ "type": "string" }, "MatchKey": { - "markdownDescription": "A key that allows grouping of multiple input attributes into a unified matching group. For example, consider a scenario where the source table contains various addresses, such as `business_address` and `shipping_address` . By assigning a `matchKey` called `address` to both attributes, AWS Entity Resolution will match records across these fields to create a consolidated matching group. If no `matchKey` is specified for a column, it won't be utilized for matching purposes but will still be included in the output table.", + "markdownDescription": "A key that allows grouping of multiple input attributes into a unified matching group.\n\nFor example, consider a scenario where the source table contains various addresses, such as `business_address` and `shipping_address` . By assigning a `matchKey` called `address` to both attributes, AWS Entity Resolution will match records across these fields to create a consolidated matching group.\n\nIf no `matchKey` is specified for a column, it won't be utilized for matching purposes but will still be included in the output table.", "title": "MatchKey", "type": "string" }, @@ -99690,7 +99682,7 @@ "type": "boolean" }, "DataRepositoryPath": { - "markdownDescription": "The path to the Amazon S3 data repository that will be linked to the file system. The path can be an S3 bucket or prefix in the format `s3://myBucket/myPrefix/` . This path specifies where in the S3 data repository files will be imported from or exported to.", + "markdownDescription": "The path to the Amazon S3 data repository that will be linked to the file system. The path can be an S3 bucket or prefix in the format `s3://bucket-name/prefix/` . This path specifies where in the S3 data repository files will be imported from or exported to.", "title": "DataRepositoryPath", "type": "string" }, @@ -103339,7 +103331,7 @@ "items": { "$ref": "#/definitions/AWS::GameLift::Fleet.LocationConfiguration" }, - "markdownDescription": "A set of remote locations to deploy additional instances to and manage as part of the fleet. This parameter can only be used when creating fleets in AWS Regions that support multiple locations. You can add any Amazon GameLift-supported AWS Region as a remote location, in the form of an AWS Region code, such as `us-west-2` or Local Zone code. To create a fleet with instances in the home Region only, don't set this parameter.\n\nWhen using this parameter, Amazon GameLift requires you to include your home location in the request.", + "markdownDescription": "A set of remote locations to deploy additional instances to and manage as a multi-location fleet. Use this parameter when creating a fleet in AWS Regions that support multiple locations. You can add any AWS Region or Local Zone that's supported by Amazon GameLift. Provide a list of one or more AWS Region codes, such as `us-west-2` , or Local Zone names. When using this parameter, Amazon GameLift requires you to include your home location in the request. For a list of supported Regions and Local Zones, see [Amazon GameLift service locations](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-regions.html) for managed hosting.", "title": "Locations", "type": "array" }, @@ -103585,7 +103577,7 @@ "additionalProperties": false, "properties": { "Location": { - "markdownDescription": "An AWS Region code, such as `us-west-2` .", + "markdownDescription": "An AWS Region code, such as `us-west-2` . For a list of supported Regions and Local Zones, see [Amazon GameLift service locations](https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-regions.html) for managed hosting.", "title": "Location", "type": "string" }, @@ -104178,7 +104170,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "A list of labels to assign to the new matchmaking configuration resource. Tags are developer-defined key-value pairs. Tagging AWS resources are useful for resource management, access management and cost allocation. For more information, see [Tagging AWS Resources](https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) in the *AWS General Rareference* .", + "markdownDescription": "A list of labels to assign to the new resource. Tags are developer-defined key-value pairs. Tagging AWS resources are useful for resource management, access management, and cost allocation. For more information, see [Tagging AWS Resources](https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) in the *AWS General Rareference* .", "title": "Tags", "type": "array" } @@ -105354,7 +105346,7 @@ "type": "object" }, "ConnectionType": { - "markdownDescription": "The type of the connection. Currently, these types are supported:\n\n- `JDBC` - Designates a connection to a database through Java Database Connectivity (JDBC).\n\n`JDBC` Connections use the following ConnectionParameters.\n\n- Required: All of ( `HOST` , `PORT` , `JDBC_ENGINE` ) or `JDBC_CONNECTION_URL` .\n- Required: All of ( `USERNAME` , `PASSWORD` ) or `SECRET_ID` .\n- Optional: `JDBC_ENFORCE_SSL` , `CUSTOM_JDBC_CERT` , `CUSTOM_JDBC_CERT_STRING` , `SKIP_CUSTOM_JDBC_CERT_VALIDATION` . These parameters are used to configure SSL with JDBC.\n- `KAFKA` - Designates a connection to an Apache Kafka streaming platform.\n\n`KAFKA` Connections use the following ConnectionParameters.\n\n- Required: `KAFKA_BOOTSTRAP_SERVERS` .\n- Optional: `KAFKA_SSL_ENABLED` , `KAFKA_CUSTOM_CERT` , `KAFKA_SKIP_CUSTOM_CERT_VALIDATION` . These parameters are used to configure SSL with `KAFKA` .\n- Optional: `KAFKA_CLIENT_KEYSTORE` , `KAFKA_CLIENT_KEYSTORE_PASSWORD` , `KAFKA_CLIENT_KEY_PASSWORD` , `ENCRYPTED_KAFKA_CLIENT_KEYSTORE_PASSWORD` , `ENCRYPTED_KAFKA_CLIENT_KEY_PASSWORD` . These parameters are used to configure TLS client configuration with SSL in `KAFKA` .\n- Optional: `KAFKA_SASL_MECHANISM` . Can be specified as `SCRAM-SHA-512` , `GSSAPI` , or `AWS_MSK_IAM` .\n- Optional: `KAFKA_SASL_SCRAM_USERNAME` , `KAFKA_SASL_SCRAM_PASSWORD` , `ENCRYPTED_KAFKA_SASL_SCRAM_PASSWORD` . These parameters are used to configure SASL/SCRAM-SHA-512 authentication with `KAFKA` .\n- Optional: `KAFKA_SASL_GSSAPI_KEYTAB` , `KAFKA_SASL_GSSAPI_KRB5_CONF` , `KAFKA_SASL_GSSAPI_SERVICE` , `KAFKA_SASL_GSSAPI_PRINCIPAL` . These parameters are used to configure SASL/GSSAPI authentication with `KAFKA` .\n- `MONGODB` - Designates a connection to a MongoDB document database.\n\n`MONGODB` Connections use the following ConnectionParameters.\n\n- Required: `CONNECTION_URL` .\n- Required: All of ( `USERNAME` , `PASSWORD` ) or `SECRET_ID` .\n- `SALESFORCE` - Designates a connection to Salesforce using OAuth authencation.\n\n- Requires the `AuthenticationConfiguration` member to be configured.\n- `NETWORK` - Designates a network connection to a data source within an Amazon Virtual Private Cloud environment (Amazon VPC).\n\n`NETWORK` Connections do not require ConnectionParameters. Instead, provide a PhysicalConnectionRequirements.\n- `MARKETPLACE` - Uses configuration settings contained in a connector purchased from AWS Marketplace to read from and write to data stores that are not natively supported by AWS Glue .\n\n`MARKETPLACE` Connections use the following ConnectionParameters.\n\n- Required: `CONNECTOR_TYPE` , `CONNECTOR_URL` , `CONNECTOR_CLASS_NAME` , `CONNECTION_URL` .\n- Required for `JDBC` `CONNECTOR_TYPE` connections: All of ( `USERNAME` , `PASSWORD` ) or `SECRET_ID` .\n- `CUSTOM` - Uses configuration settings contained in a custom connector to read from and write to data stores that are not natively supported by AWS Glue .\n\n`SFTP` is not supported.\n\nFor more information about how optional ConnectionProperties are used to configure features in AWS Glue , consult [AWS Glue connection properties](https://docs.aws.amazon.com/glue/latest/dg/connection-defining.html) .\n\nFor more information about how optional ConnectionProperties are used to configure features in AWS Glue Studio, consult [Using connectors and connections](https://docs.aws.amazon.com/glue/latest/ug/connectors-chapter.html) .", + "markdownDescription": "The type of the connection. Currently, these types are supported:\n\n- `JDBC` - Designates a connection to a database through Java Database Connectivity (JDBC).\n\n`JDBC` Connections use the following ConnectionParameters.\n\n- Required: All of ( `HOST` , `PORT` , `JDBC_ENGINE` ) or `JDBC_CONNECTION_URL` .\n- Required: All of ( `USERNAME` , `PASSWORD` ) or `SECRET_ID` .\n- Optional: `JDBC_ENFORCE_SSL` , `CUSTOM_JDBC_CERT` , `CUSTOM_JDBC_CERT_STRING` , `SKIP_CUSTOM_JDBC_CERT_VALIDATION` . These parameters are used to configure SSL with JDBC.\n- `KAFKA` - Designates a connection to an Apache Kafka streaming platform.\n\n`KAFKA` Connections use the following ConnectionParameters.\n\n- Required: `KAFKA_BOOTSTRAP_SERVERS` .\n- Optional: `KAFKA_SSL_ENABLED` , `KAFKA_CUSTOM_CERT` , `KAFKA_SKIP_CUSTOM_CERT_VALIDATION` . These parameters are used to configure SSL with `KAFKA` .\n- Optional: `KAFKA_CLIENT_KEYSTORE` , `KAFKA_CLIENT_KEYSTORE_PASSWORD` , `KAFKA_CLIENT_KEY_PASSWORD` , `ENCRYPTED_KAFKA_CLIENT_KEYSTORE_PASSWORD` , `ENCRYPTED_KAFKA_CLIENT_KEY_PASSWORD` . These parameters are used to configure TLS client configuration with SSL in `KAFKA` .\n- Optional: `KAFKA_SASL_MECHANISM` . Can be specified as `SCRAM-SHA-512` , `GSSAPI` , or `AWS_MSK_IAM` .\n- Optional: `KAFKA_SASL_SCRAM_USERNAME` , `KAFKA_SASL_SCRAM_PASSWORD` , `ENCRYPTED_KAFKA_SASL_SCRAM_PASSWORD` . These parameters are used to configure SASL/SCRAM-SHA-512 authentication with `KAFKA` .\n- Optional: `KAFKA_SASL_GSSAPI_KEYTAB` , `KAFKA_SASL_GSSAPI_KRB5_CONF` , `KAFKA_SASL_GSSAPI_SERVICE` , `KAFKA_SASL_GSSAPI_PRINCIPAL` . These parameters are used to configure SASL/GSSAPI authentication with `KAFKA` .\n- `MONGODB` - Designates a connection to a MongoDB document database.\n\n`MONGODB` Connections use the following ConnectionParameters.\n\n- Required: `CONNECTION_URL` .\n- Required: All of ( `USERNAME` , `PASSWORD` ) or `SECRET_ID` .\n- `SALESFORCE` - Designates a connection to Salesforce using OAuth authencation.\n\n- Requires the `AuthenticationConfiguration` member to be configured.\n- `VIEW_VALIDATION_REDSHIFT` - Designates a connection used for view validation by Amazon Redshift.\n- `VIEW_VALIDATION_ATHENA` - Designates a connection used for view validation by Amazon Athena.\n- `NETWORK` - Designates a network connection to a data source within an Amazon Virtual Private Cloud environment (Amazon VPC).\n\n`NETWORK` Connections do not require ConnectionParameters. Instead, provide a PhysicalConnectionRequirements.\n- `MARKETPLACE` - Uses configuration settings contained in a connector purchased from AWS Marketplace to read from and write to data stores that are not natively supported by AWS Glue .\n\n`MARKETPLACE` Connections use the following ConnectionParameters.\n\n- Required: `CONNECTOR_TYPE` , `CONNECTOR_URL` , `CONNECTOR_CLASS_NAME` , `CONNECTION_URL` .\n- Required for `JDBC` `CONNECTOR_TYPE` connections: All of ( `USERNAME` , `PASSWORD` ) or `SECRET_ID` .\n- `CUSTOM` - Uses configuration settings contained in a custom connector to read from and write to data stores that are not natively supported by AWS Glue .\n\n`SFTP` is not supported.\n\nFor more information about how optional ConnectionProperties are used to configure features in AWS Glue , consult [AWS Glue connection properties](https://docs.aws.amazon.com/glue/latest/dg/connection-defining.html) .\n\nFor more information about how optional ConnectionProperties are used to configure features in AWS Glue Studio, consult [Using connectors and connections](https://docs.aws.amazon.com/glue/latest/ug/connectors-chapter.html) .", "title": "ConnectionType", "type": "string" }, @@ -106957,7 +106949,7 @@ "title": "FindMatchesParameters" }, "TransformType": { - "markdownDescription": "The type of machine learning transform. `FIND_MATCHES` is the only option.\n\nFor information about the types of machine learning transforms, see [Creating Machine Learning Transforms](https://docs.aws.amazon.com/glue/latest/dg/add-job-machine-learning-transform.html) .", + "markdownDescription": "The type of machine learning transform. `FIND_MATCHES` is the only option.\n\nFor information about the types of machine learning transforms, see [Working with machine learning transforms](https://docs.aws.amazon.com/glue/latest/dg/console-machine-learning-transforms.html) .", "title": "TransformType", "type": "string" } @@ -112638,27 +112630,27 @@ "additionalProperties": false, "properties": { "AgentStatus": { - "markdownDescription": "", + "markdownDescription": "The status of AgentEndpoint.", "title": "AgentStatus", "type": "string" }, "AuditResults": { - "markdownDescription": "", + "markdownDescription": "The results of the audit.", "title": "AuditResults", "type": "string" }, "EgressAddress": { "$ref": "#/definitions/AWS::GroundStation::DataflowEndpointGroup.ConnectionDetails", - "markdownDescription": "", + "markdownDescription": "The egress address of AgentEndpoint.", "title": "EgressAddress" }, "IngressAddress": { "$ref": "#/definitions/AWS::GroundStation::DataflowEndpointGroup.RangedConnectionDetails", - "markdownDescription": "", + "markdownDescription": "The ingress address of AgentEndpoint.", "title": "IngressAddress" }, "Name": { - "markdownDescription": "", + "markdownDescription": "Name string associated with AgentEndpoint. Used as a human-readable identifier for AgentEndpoint.", "title": "Name", "type": "string" } @@ -112950,12 +112942,12 @@ "additionalProperties": false, "properties": { "KmsAliasArn": { - "markdownDescription": "", + "markdownDescription": "KMS Alias Arn.", "title": "KmsAliasArn", "type": "string" }, "KmsKeyArn": { - "markdownDescription": "", + "markdownDescription": "KMS Key Arn.", "title": "KmsKeyArn", "type": "string" } @@ -113250,7 +113242,7 @@ "type": "string" }, "DetectorId": { - "markdownDescription": "The ID of the detector belonging to the GuardDuty account that you want to create a filter for.", + "markdownDescription": "The detector ID associated with the GuardDuty account for which you want to create a filter.\n\nTo find the `detectorId` in the current Region, see the\nSettings page in the GuardDuty console, or run the [ListDetectors](https://docs.aws.amazon.com/guardduty/latest/APIReference/API_ListDetectors.html) API.", "title": "DetectorId", "type": "string" }, @@ -113460,7 +113452,7 @@ "type": "boolean" }, "DetectorId": { - "markdownDescription": "The unique ID of the detector of the GuardDuty account that you want to create an IPSet for.", + "markdownDescription": "The unique ID of the detector of the GuardDuty account for which you want to create an IPSet.\n\nTo find the `detectorId` in the current Region, see the\nSettings page in the GuardDuty console, or run the [ListDetectors](https://docs.aws.amazon.com/guardduty/latest/APIReference/API_ListDetectors.html) API.", "title": "DetectorId", "type": "string" }, @@ -113571,7 +113563,7 @@ "additionalProperties": false, "properties": { "DetectorId": { - "markdownDescription": "The unique ID of the detector of the GuardDuty member account.", + "markdownDescription": "The unique ID of the detector of the GuardDuty member account.\n\nTo find the `detectorId` in the current Region, see the\nSettings page in the GuardDuty console, or run the [ListDetectors](https://docs.aws.amazon.com/guardduty/latest/APIReference/API_ListDetectors.html) API.", "title": "DetectorId", "type": "string" }, @@ -113746,7 +113738,7 @@ "type": "boolean" }, "DetectorId": { - "markdownDescription": "The unique ID of the detector of the GuardDuty account that you want to create a threatIntelSet for.", + "markdownDescription": "The unique ID of the detector of the GuardDuty account for which you want to create a `ThreatIntelSet` .\n\nTo find the `detectorId` in the current Region, see the\nSettings page in the GuardDuty console, or run the [ListDetectors](https://docs.aws.amazon.com/guardduty/latest/APIReference/API_ListDetectors.html) API.", "title": "DetectorId", "type": "string" }, @@ -114591,7 +114583,7 @@ "items": { "type": "string" }, - "markdownDescription": "A list of certificate thumbprints that are associated with the specified IAM OIDC provider resource object. For more information, see [CreateOpenIDConnectProvider](https://docs.aws.amazon.com/IAM/latest/APIReference/API_CreateOpenIDConnectProvider.html) .", + "markdownDescription": "A list of certificate thumbprints that are associated with the specified IAM OIDC provider resource object. For more information, see [CreateOpenIDConnectProvider](https://docs.aws.amazon.com/IAM/latest/APIReference/API_CreateOpenIDConnectProvider.html) .\n\nThis property is optional. If it is not included, IAM will retrieve and use the top intermediate certificate authority (CA) thumbprint of the OpenID Connect identity provider server certificate.", "title": "ThumbprintList", "type": "array" }, @@ -116651,7 +116643,7 @@ "type": "string" }, "DisplayName": { - "markdownDescription": "The display name value for the group. The length limit is 1,024 characters. This value can consist of letters, accented characters, symbols, numbers, punctuation, tab, new line, carriage return, space, and nonbreaking space in this attribute. This value is specified at the time the group is created and stored as an attribute of the group object in the identity store.", + "markdownDescription": "The display name value for the group. The length limit is 1,024 characters. This value can consist of letters, accented characters, symbols, numbers, punctuation, tab, new line, carriage return, space, and nonbreaking space in this attribute. This value is specified at the time the group is created and stored as an attribute of the group object in the identity store.\n\nPrefix search supports a maximum of 1,000 characters for the string.", "title": "DisplayName", "type": "string" }, @@ -117679,7 +117671,7 @@ "items": { "type": "string" }, - "markdownDescription": "Tags for Image Builder to apply to the output container image that &INS; scans. Tags can help you identify and manage your scanned images.", + "markdownDescription": "Tags for Image Builder to apply to the output container image that Amazon Inspector scans. Tags can help you identify and manage your scanned images.", "title": "ContainerTags", "type": "array" }, @@ -117920,7 +117912,7 @@ "items": { "type": "string" }, - "markdownDescription": "Tags for Image Builder to apply to the output container image that &INS; scans. Tags can help you identify and manage your scanned images.", + "markdownDescription": "Tags for Image Builder to apply to the output container image that Amazon Inspector scans. Tags can help you identify and manage your scanned images.", "title": "ContainerTags", "type": "array" }, @@ -129666,7 +129658,7 @@ "type": "array" }, "AssetModelName": { - "markdownDescription": "A unique, friendly name for the asset model.", + "markdownDescription": "A unique name for the asset model.", "title": "AssetModelName", "type": "string" }, @@ -129818,7 +129810,7 @@ "additionalProperties": false, "properties": { "DataType": { - "markdownDescription": "The data type of the asset model property.", + "markdownDescription": "The data type of the asset model property.\n\nIf you specify `STRUCT` , you must also specify `dataTypeSpec` to identify the type of the structure for this property.", "title": "DataType", "type": "string" }, @@ -130201,7 +130193,7 @@ "type": "array" }, "GatewayName": { - "markdownDescription": "A unique, friendly name for the gateway.", + "markdownDescription": "A unique name for the gateway.", "title": "GatewayName", "type": "string" }, @@ -130280,7 +130272,7 @@ }, "SiemensIE": { "$ref": "#/definitions/AWS::IoTSiteWise::Gateway.SiemensIE", - "markdownDescription": "", + "markdownDescription": "A AWS IoT SiteWise Edge gateway that runs on a Siemens Industrial Edge Device.", "title": "SiemensIE" } }, @@ -130318,7 +130310,7 @@ "additionalProperties": false, "properties": { "IotCoreThingName": { - "markdownDescription": "", + "markdownDescription": "The name of the AWS IoT Thing for your AWS IoT SiteWise Edge gateway.", "title": "IotCoreThingName", "type": "string" } @@ -131630,7 +131622,7 @@ "type": "object" }, "WorkspaceId": { - "markdownDescription": "", + "markdownDescription": "The ID of the workspace.", "title": "WorkspaceId", "type": "string" } @@ -139167,12 +139159,12 @@ "title": "DeliveryStreamEncryptionConfigurationInput" }, "DeliveryStreamName": { - "markdownDescription": "The name of the delivery stream.", + "markdownDescription": "The name of the Firehose stream.", "title": "DeliveryStreamName", "type": "string" }, "DeliveryStreamType": { - "markdownDescription": "The delivery stream type. This can be one of the following values:\n\n- `DirectPut` : Provider applications access the delivery stream directly.\n- `KinesisStreamAsSource` : The delivery stream uses a Kinesis data stream as a source.", + "markdownDescription": "The Firehose stream type. This can be one of the following values:\n\n- `DirectPut` : Provider applications access the Firehose stream directly.\n- `KinesisStreamAsSource` : The Firehose stream uses a Kinesis data stream as a source.", "title": "DeliveryStreamType", "type": "string" }, @@ -139225,7 +139217,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "A set of tags to assign to the delivery stream. A tag is a key-value pair that you can define and assign to AWS resources. Tags are metadata. For example, you can add friendly names and descriptions or other types of information that can help you distinguish the delivery stream. For more information about tags, see [Using Cost Allocation Tags](https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html) in the AWS Billing and Cost Management User Guide.\n\nYou can specify up to 50 tags when creating a delivery stream.\n\nIf you specify tags in the `CreateDeliveryStream` action, Amazon Data Firehose performs an additional authorization on the `firehose:TagDeliveryStream` action to verify if users have permissions to create tags. If you do not provide this permission, requests to create new Firehose delivery streams with IAM resource tags will fail with an `AccessDeniedException` such as following.\n\n*AccessDeniedException*\n\nUser: arn:aws:sts::x:assumed-role/x/x is not authorized to perform: firehose:TagDeliveryStream on resource: arn:aws:firehose:us-east-1:x:deliverystream/x with an explicit deny in an identity-based policy.\n\nFor an example IAM policy, see [Tag example.](https://docs.aws.amazon.com/firehose/latest/APIReference/API_CreateDeliveryStream.html#API_CreateDeliveryStream_Examples)", + "markdownDescription": "A set of tags to assign to the Firehose stream. A tag is a key-value pair that you can define and assign to AWS resources. Tags are metadata. For example, you can add friendly names and descriptions or other types of information that can help you distinguish the Firehose stream. For more information about tags, see [Using Cost Allocation Tags](https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html) in the AWS Billing and Cost Management User Guide.\n\nYou can specify up to 50 tags when creating a Firehose stream.\n\nIf you specify tags in the `CreateDeliveryStream` action, Amazon Data Firehose performs an additional authorization on the `firehose:TagDeliveryStream` action to verify if users have permissions to create tags. If you do not provide this permission, requests to create new Firehose Firehose streams with IAM resource tags will fail with an `AccessDeniedException` such as following.\n\n*AccessDeniedException*\n\nUser: arn:aws:sts::x:assumed-role/x/x is not authorized to perform: firehose:TagDeliveryStream on resource: arn:aws:firehose:us-east-1:x:deliverystream/x with an explicit deny in an identity-based policy.\n\nFor an example IAM policy, see [Tag example.](https://docs.aws.amazon.com/firehose/latest/APIReference/API_CreateDeliveryStream.html#API_CreateDeliveryStream_Examples)", "title": "Tags", "type": "array" } @@ -139261,7 +139253,7 @@ "type": "number" }, "SizeInMBs": { - "markdownDescription": "Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5.\n\nWe recommend setting this parameter to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec, the value should be 10 MB or higher.", + "markdownDescription": "Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5.\n\nWe recommend setting this parameter to a value greater than the amount of data you typically ingest into the Firehose stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec, the value should be 10 MB or higher.", "title": "SizeInMBs", "type": "number" } @@ -139759,7 +139751,7 @@ }, "CloudWatchLoggingOptions": { "$ref": "#/definitions/AWS::KinesisFirehose::DeliveryStream.CloudWatchLoggingOptions", - "markdownDescription": "The Amazon CloudWatch logging options for your delivery stream.", + "markdownDescription": "The Amazon CloudWatch logging options for your Firehose stream.", "title": "CloudWatchLoggingOptions" }, "CompressionFormat": { @@ -139818,7 +139810,7 @@ "title": "S3BackupConfiguration" }, "S3BackupMode": { - "markdownDescription": "The Amazon S3 backup mode. After you create a delivery stream, you can update it to enable Amazon S3 backup if it is disabled. If backup is enabled, you can't update the delivery stream to disable it.", + "markdownDescription": "The Amazon S3 backup mode. After you create a Firehose stream, you can update it to enable Amazon S3 backup if it is disabled. If backup is enabled, you can't update the Firehose stream to disable it.", "title": "S3BackupMode", "type": "string" } @@ -140231,7 +140223,7 @@ "properties": { "CloudWatchLoggingOptions": { "$ref": "#/definitions/AWS::KinesisFirehose::DeliveryStream.CloudWatchLoggingOptions", - "markdownDescription": "The CloudWatch logging options for your delivery stream.", + "markdownDescription": "The CloudWatch logging options for your Firehose stream.", "title": "CloudWatchLoggingOptions" }, "ClusterJDBCURL": { @@ -140270,7 +140262,7 @@ "title": "S3BackupConfiguration" }, "S3BackupMode": { - "markdownDescription": "The Amazon S3 backup mode. After you create a delivery stream, you can update it to enable Amazon S3 backup if it is disabled. If backup is enabled, you can't update the delivery stream to disable it.", + "markdownDescription": "The Amazon S3 backup mode. After you create a Firehose stream, you can update it to enable Amazon S3 backup if it is disabled. If backup is enabled, you can't update the Firehose stream to disable it.", "title": "S3BackupMode", "type": "string" }, @@ -140332,7 +140324,7 @@ }, "CloudWatchLoggingOptions": { "$ref": "#/definitions/AWS::KinesisFirehose::DeliveryStream.CloudWatchLoggingOptions", - "markdownDescription": "The CloudWatch logging options for your delivery stream.", + "markdownDescription": "The CloudWatch logging options for your Firehose stream.", "title": "CloudWatchLoggingOptions" }, "CompressionFormat": { @@ -140464,7 +140456,7 @@ }, "ProcessingConfiguration": { "$ref": "#/definitions/AWS::KinesisFirehose::DeliveryStream.ProcessingConfiguration", - "markdownDescription": "", + "markdownDescription": "Specifies configuration for Snowflake.", "title": "ProcessingConfiguration" }, "RetryOptions": { @@ -140592,7 +140584,7 @@ }, "CloudWatchLoggingOptions": { "$ref": "#/definitions/AWS::KinesisFirehose::DeliveryStream.CloudWatchLoggingOptions", - "markdownDescription": "The Amazon CloudWatch logging options for your delivery stream.", + "markdownDescription": "The Amazon CloudWatch logging options for your Firehose stream.", "title": "CloudWatchLoggingOptions" }, "HECAcknowledgmentTimeoutInSeconds": { @@ -142639,7 +142631,7 @@ "items": { "type": "string" }, - "markdownDescription": "(Streams and SQS) A list of current response type enums applied to the event source mapping.\n\nValid Values: `ReportBatchItemFailures`", + "markdownDescription": "(Kinesis, DynamoDB Streams, and SQS) A list of current response type enums applied to the event source mapping.\n\nValid Values: `ReportBatchItemFailures`", "title": "FunctionResponseTypes", "type": "array" }, @@ -142931,7 +142923,7 @@ }, "Code": { "$ref": "#/definitions/AWS::Lambda::Function.Code", - "markdownDescription": "The code for the function.", + "markdownDescription": "The code for the function. You can define your function code in multiple ways:\n\n- For .zip deployment packages, you can specify the Amazon S3 location of the .zip file in the `S3Bucket` , `S3Key` , and `S3ObjectVersion` properties.\n- For .zip deployment packages, you can alternatively define the function code inline in the `ZipFile` property. This method works only for Node.js and Python functions.\n- For container images, specify the URI of your container image in the Amazon ECR registry in the `ImageUri` property.", "title": "Code" }, "CodeSigningConfigArn": { @@ -142983,7 +142975,7 @@ "title": "ImageConfig" }, "KmsKeyArn": { - "markdownDescription": "The ARN of the AWS Key Management Service ( AWS KMS ) customer managed key that's used to encrypt your function's [environment variables](https://docs.aws.amazon.com/lambda/latest/dg/configuration-envvars.html#configuration-envvars-encryption) . When [Lambda SnapStart](https://docs.aws.amazon.com/lambda/latest/dg/snapstart-security.html) is activated, Lambda also uses this key is to encrypt your function's snapshot. If you deploy your function using a container image, Lambda also uses this key to encrypt your function when it's deployed. Note that this is not the same key that's used to protect your container image in the Amazon Elastic Container Registry (Amazon ECR).\nIf you don't provide a customer managed key, Lambda uses a default service key.", + "markdownDescription": "The ARN of the AWS Key Management Service ( AWS KMS ) customer managed key that's used to encrypt your function's [environment variables](https://docs.aws.amazon.com/lambda/latest/dg/configuration-envvars.html#configuration-envvars-encryption) . When [Lambda SnapStart](https://docs.aws.amazon.com/lambda/latest/dg/snapstart-security.html) is activated, Lambda also uses this key is to encrypt your function's snapshot. If you deploy your function using a container image, Lambda also uses this key to encrypt your function when it's deployed. Note that this is not the same key that's used to protect your container image in the Amazon Elastic Container Registry ( Amazon ECR ). If you don't provide a customer managed key, Lambda uses a default service key.", "title": "KmsKeyArn", "type": "string" }, @@ -143039,7 +143031,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "A list of [tags](https://docs.aws.amazon.com/lambda/latest/dg/tagging.html) to apply to the function.", + "markdownDescription": "A list of [tags](https://docs.aws.amazon.com/lambda/latest/dg/tagging.html) to apply to the function.\n\n> You must have the `lambda:TagResource` , `lambda:UntagResource` , and `lambda:ListTags` permissions for your [IAM principal](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts.html) to manage the AWS CloudFormation stack. If you don't have these permissions, there might be unexpected behavior with stack-level tags propagating to the resource during resource creation and update.", "title": "Tags", "type": "array" }, @@ -143582,7 +143574,7 @@ "type": "string" }, "Principal": { - "markdownDescription": "The AWS service or AWS account that invokes the function. If you specify a service, use `SourceArn` or `SourceAccount` to limit who can invoke the function through that service.", + "markdownDescription": "The AWS service , AWS account , IAM user, or IAM role that invokes the function. If you specify a service, use `SourceArn` or `SourceAccount` to limit who can invoke the function through that service.", "title": "Principal", "type": "string" }, @@ -143686,7 +143678,7 @@ "type": "string" }, "TargetFunctionArn": { - "markdownDescription": "The name of the Lambda function.\n\n**Name formats** - *Function name* - `my-function` .\n- *Function ARN* - `arn:aws:lambda:us-west-2:123456789012:function:my-function` .\n- *Partial ARN* - `123456789012:function:my-function` .\n\nThe length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.", + "markdownDescription": "The name of the Lambda function.\n\n**Name formats** - *Function name* - `my-function` .\n- *Function ARN* - `lambda: : :function:my-function` .\n- *Partial ARN* - `:function:my-function` .\n\nThe length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.", "title": "TargetFunctionArn", "type": "string" } @@ -144204,7 +144196,7 @@ }, "VoiceSettings": { "$ref": "#/definitions/AWS::Lex::Bot.VoiceSettings", - "markdownDescription": "Defines settings for using an Amazon Polly voice to communicate with a user.", + "markdownDescription": "Defines settings for using an Amazon Polly voice to communicate with a user.\n\nValid values include:\n\n- `standard`\n- `neural`\n- `long-form`\n- `generative`", "title": "VoiceSettings" } }, @@ -150405,7 +150397,7 @@ "type": "array" }, "Name": { - "markdownDescription": "A name for the query definition.\n\n> You can use the name to create a folder structure for your queries. To create a folder, use a forward slash (/) to prefix your desired query name with your desired folder name. For example, `/ *folder-name* / *query-name*` .", + "markdownDescription": "A name for the query definition.\n\n> You can use the name to create a folder structure for your queries. To create a folder, use a forward slash (/) to prefix your desired query name with your desired folder name. For example, `*folder-name* / *query-name*` .", "title": "Name", "type": "string" }, @@ -151997,67 +151989,67 @@ "properties": { "BrokerNodeGroupInfo": { "$ref": "#/definitions/AWS::MSK::Cluster.BrokerNodeGroupInfo", - "markdownDescription": "Information about the broker nodes in the cluster.", + "markdownDescription": "", "title": "BrokerNodeGroupInfo" }, "ClientAuthentication": { "$ref": "#/definitions/AWS::MSK::Cluster.ClientAuthentication", - "markdownDescription": "Includes all client authentication related information.", + "markdownDescription": "", "title": "ClientAuthentication" }, "ClusterName": { - "markdownDescription": "The name of the cluster.", + "markdownDescription": "", "title": "ClusterName", "type": "string" }, "ConfigurationInfo": { "$ref": "#/definitions/AWS::MSK::Cluster.ConfigurationInfo", - "markdownDescription": "Represents the configuration that you want MSK to use for the cluster.", + "markdownDescription": "", "title": "ConfigurationInfo" }, "CurrentVersion": { - "markdownDescription": "The version of the cluster that you want to update.", + "markdownDescription": "", "title": "CurrentVersion", "type": "string" }, "EncryptionInfo": { "$ref": "#/definitions/AWS::MSK::Cluster.EncryptionInfo", - "markdownDescription": "Includes all encryption-related information.", + "markdownDescription": "", "title": "EncryptionInfo" }, "EnhancedMonitoring": { - "markdownDescription": "Specifies the level of monitoring for the MSK cluster. The possible values are `DEFAULT` , `PER_BROKER` , and `PER_TOPIC_PER_BROKER` .", + "markdownDescription": "", "title": "EnhancedMonitoring", "type": "string" }, "KafkaVersion": { - "markdownDescription": "The version of Apache Kafka. You can use Amazon MSK to create clusters that use Apache Kafka versions 1.1.1 and 2.2.1.", + "markdownDescription": "", "title": "KafkaVersion", "type": "string" }, "LoggingInfo": { "$ref": "#/definitions/AWS::MSK::Cluster.LoggingInfo", - "markdownDescription": "Logging Info details.", + "markdownDescription": "", "title": "LoggingInfo" }, "NumberOfBrokerNodes": { - "markdownDescription": "The number of broker nodes in the cluster.", + "markdownDescription": "", "title": "NumberOfBrokerNodes", "type": "number" }, "OpenMonitoring": { "$ref": "#/definitions/AWS::MSK::Cluster.OpenMonitoring", - "markdownDescription": "The settings for open monitoring.", + "markdownDescription": "", "title": "OpenMonitoring" }, "StorageMode": { - "markdownDescription": "This controls storage mode for supported storage tiers.", + "markdownDescription": "", "title": "StorageMode", "type": "string" }, "Tags": { "additionalProperties": true, - "markdownDescription": "Create tags when creating the cluster.", + "markdownDescription": "", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" @@ -152101,17 +152093,17 @@ "properties": { "CloudWatchLogs": { "$ref": "#/definitions/AWS::MSK::Cluster.CloudWatchLogs", - "markdownDescription": "Details of the CloudWatch Logs destination for broker logs.", + "markdownDescription": "", "title": "CloudWatchLogs" }, "Firehose": { "$ref": "#/definitions/AWS::MSK::Cluster.Firehose", - "markdownDescription": "Details of the Kinesis Data Firehose delivery stream that is the destination for broker logs.", + "markdownDescription": "", "title": "Firehose" }, "S3": { "$ref": "#/definitions/AWS::MSK::Cluster.S3", - "markdownDescription": "Details of the Amazon S3 destination for broker logs.", + "markdownDescription": "", "title": "S3" } }, @@ -152121,7 +152113,7 @@ "additionalProperties": false, "properties": { "BrokerAZDistribution": { - "markdownDescription": "This parameter is currently not in use.", + "markdownDescription": "", "title": "BrokerAZDistribution", "type": "string" }, @@ -152129,13 +152121,13 @@ "items": { "type": "string" }, - "markdownDescription": "The list of subnets to connect to in the client virtual private cloud (VPC). Amazon creates elastic network interfaces inside these subnets. Client applications use elastic network interfaces to produce and consume data.\n\nIf you use the US West (N. California) Region, specify exactly two subnets. For other Regions where Amazon MSK is available, you can specify either two or three subnets. The subnets that you specify must be in distinct Availability Zones. When you create a cluster, Amazon MSK distributes the broker nodes evenly across the subnets that you specify.\n\nClient subnets can't occupy the Availability Zone with ID `use1-az3` .", + "markdownDescription": "", "title": "ClientSubnets", "type": "array" }, "ConnectivityInfo": { "$ref": "#/definitions/AWS::MSK::Cluster.ConnectivityInfo", - "markdownDescription": "Information about the cluster's connectivity setting.", + "markdownDescription": "", "title": "ConnectivityInfo" }, "InstanceType": { @@ -152147,13 +152139,13 @@ "items": { "type": "string" }, - "markdownDescription": "The security groups to associate with the elastic network interfaces in order to specify who can connect to and communicate with the Amazon MSK cluster. If you don't specify a security group, Amazon MSK uses the default security group associated with the VPC. If you specify security groups that were shared with you, you must ensure that you have permissions to them. Specifically, you need the `ec2:DescribeSecurityGroups` permission.", + "markdownDescription": "", "title": "SecurityGroups", "type": "array" }, "StorageInfo": { "$ref": "#/definitions/AWS::MSK::Cluster.StorageInfo", - "markdownDescription": "Contains information about storage volumes attached to Amazon MSK broker nodes.", + "markdownDescription": "", "title": "StorageInfo" } }, @@ -152168,17 +152160,17 @@ "properties": { "Sasl": { "$ref": "#/definitions/AWS::MSK::Cluster.Sasl", - "markdownDescription": "Details for client authentication using SASL. To turn on SASL, you must also turn on `EncryptionInTransit` by setting `inCluster` to true. You must set `clientBroker` to either `TLS` or `TLS_PLAINTEXT` . If you choose `TLS_PLAINTEXT` , then you must also set `unauthenticated` to true.", + "markdownDescription": "", "title": "Sasl" }, "Tls": { "$ref": "#/definitions/AWS::MSK::Cluster.Tls", - "markdownDescription": "Details for ClientAuthentication using TLS. To turn on TLS access control, you must also turn on `EncryptionInTransit` by setting `inCluster` to true and `clientBroker` to `TLS` .", + "markdownDescription": "", "title": "Tls" }, "Unauthenticated": { "$ref": "#/definitions/AWS::MSK::Cluster.Unauthenticated", - "markdownDescription": "Details for ClientAuthentication using no authentication.", + "markdownDescription": "", "title": "Unauthenticated" } }, @@ -152188,12 +152180,12 @@ "additionalProperties": false, "properties": { "Enabled": { - "markdownDescription": "Specifies whether broker logs get sent to the specified CloudWatch Logs destination.", + "markdownDescription": "", "title": "Enabled", "type": "boolean" }, "LogGroup": { - "markdownDescription": "The CloudWatch log group that is the destination for broker logs.", + "markdownDescription": "", "title": "LogGroup", "type": "string" } @@ -152207,12 +152199,12 @@ "additionalProperties": false, "properties": { "Arn": { - "markdownDescription": "ARN of the configuration to use.", + "markdownDescription": "", "title": "Arn", "type": "string" }, "Revision": { - "markdownDescription": "The revision of the configuration to use.", + "markdownDescription": "", "title": "Revision", "type": "number" } @@ -152228,12 +152220,12 @@ "properties": { "PublicAccess": { "$ref": "#/definitions/AWS::MSK::Cluster.PublicAccess", - "markdownDescription": "Access control settings for the cluster's brokers.", + "markdownDescription": "", "title": "PublicAccess" }, "VpcConnectivity": { "$ref": "#/definitions/AWS::MSK::Cluster.VpcConnectivity", - "markdownDescription": "VPC connection control settings for brokers", + "markdownDescription": "", "title": "VpcConnectivity" } }, @@ -152244,11 +152236,11 @@ "properties": { "ProvisionedThroughput": { "$ref": "#/definitions/AWS::MSK::Cluster.ProvisionedThroughput", - "markdownDescription": "EBS volume provisioned throughput information.", + "markdownDescription": "", "title": "ProvisionedThroughput" }, "VolumeSize": { - "markdownDescription": "The size in GiB of the EBS volume for the data drive on each broker node.", + "markdownDescription": "", "title": "VolumeSize", "type": "number" } @@ -152259,7 +152251,7 @@ "additionalProperties": false, "properties": { "DataVolumeKMSKeyId": { - "markdownDescription": "The Amazon Resource Name (ARN) of the Amazon KMS key for encrypting data at rest. If you don't specify a KMS key, MSK creates one for you and uses it.", + "markdownDescription": "", "title": "DataVolumeKMSKeyId", "type": "string" } @@ -152273,12 +152265,12 @@ "additionalProperties": false, "properties": { "ClientBroker": { - "markdownDescription": "Indicates the encryption setting for data in transit between clients and brokers. You must set it to one of the following values.\n\n`TLS` means that client-broker communication is enabled with TLS only.\n\n`TLS_PLAINTEXT` means that client-broker communication is enabled for both TLS-encrypted, as well as plaintext data.\n\n`PLAINTEXT` means that client-broker communication is enabled in plaintext only.\n\nThe default value is `TLS` .", + "markdownDescription": "", "title": "ClientBroker", "type": "string" }, "InCluster": { - "markdownDescription": "When set to true, it indicates that data communication among the broker nodes of the cluster is encrypted. When set to false, the communication happens in plaintext.\n\nThe default value is true.", + "markdownDescription": "", "title": "InCluster", "type": "boolean" } @@ -152290,12 +152282,12 @@ "properties": { "EncryptionAtRest": { "$ref": "#/definitions/AWS::MSK::Cluster.EncryptionAtRest", - "markdownDescription": "The data-volume encryption details.", + "markdownDescription": "", "title": "EncryptionAtRest" }, "EncryptionInTransit": { "$ref": "#/definitions/AWS::MSK::Cluster.EncryptionInTransit", - "markdownDescription": "The details for encryption in transit.", + "markdownDescription": "", "title": "EncryptionInTransit" } }, @@ -152305,12 +152297,12 @@ "additionalProperties": false, "properties": { "DeliveryStream": { - "markdownDescription": "The Kinesis Data Firehose delivery stream that is the destination for broker logs.", + "markdownDescription": "", "title": "DeliveryStream", "type": "string" }, "Enabled": { - "markdownDescription": "Specifies whether broker logs get sent to the specified Kinesis Data Firehose delivery stream.", + "markdownDescription": "", "title": "Enabled", "type": "boolean" } @@ -152324,7 +152316,7 @@ "additionalProperties": false, "properties": { "Enabled": { - "markdownDescription": "SASL/IAM authentication is enabled or not.", + "markdownDescription": "", "title": "Enabled", "type": "boolean" } @@ -152338,7 +152330,7 @@ "additionalProperties": false, "properties": { "EnabledInBroker": { - "markdownDescription": "Indicates whether you want to enable or disable the JMX Exporter.", + "markdownDescription": "", "title": "EnabledInBroker", "type": "boolean" } @@ -152353,7 +152345,7 @@ "properties": { "BrokerLogs": { "$ref": "#/definitions/AWS::MSK::Cluster.BrokerLogs", - "markdownDescription": "You can configure your MSK cluster to send broker logs to different destination types. This configuration specifies the details of these destinations.", + "markdownDescription": "", "title": "BrokerLogs" } }, @@ -152366,7 +152358,7 @@ "additionalProperties": false, "properties": { "EnabledInBroker": { - "markdownDescription": "Indicates whether you want to enable or disable the Node Exporter.", + "markdownDescription": "", "title": "EnabledInBroker", "type": "boolean" } @@ -152381,7 +152373,7 @@ "properties": { "Prometheus": { "$ref": "#/definitions/AWS::MSK::Cluster.Prometheus", - "markdownDescription": "Prometheus exporter settings.", + "markdownDescription": "", "title": "Prometheus" } }, @@ -152395,12 +152387,12 @@ "properties": { "JmxExporter": { "$ref": "#/definitions/AWS::MSK::Cluster.JmxExporter", - "markdownDescription": "Indicates whether you want to enable or disable the JMX Exporter.", + "markdownDescription": "", "title": "JmxExporter" }, "NodeExporter": { "$ref": "#/definitions/AWS::MSK::Cluster.NodeExporter", - "markdownDescription": "Indicates whether you want to enable or disable the Node Exporter.", + "markdownDescription": "", "title": "NodeExporter" } }, @@ -152410,12 +152402,12 @@ "additionalProperties": false, "properties": { "Enabled": { - "markdownDescription": "Provisioned throughput is enabled or not.", + "markdownDescription": "", "title": "Enabled", "type": "boolean" }, "VolumeThroughput": { - "markdownDescription": "Throughput value of the EBS volumes for the data drive on each kafka broker node in MiB per second.", + "markdownDescription": "", "title": "VolumeThroughput", "type": "number" } @@ -152426,7 +152418,7 @@ "additionalProperties": false, "properties": { "Type": { - "markdownDescription": "DISABLED means that public access is turned off. SERVICE_PROVIDED_EIPS means that public access is turned on.", + "markdownDescription": "", "title": "Type", "type": "string" } @@ -152437,17 +152429,17 @@ "additionalProperties": false, "properties": { "Bucket": { - "markdownDescription": "The name of the S3 bucket that is the destination for broker logs.", + "markdownDescription": "", "title": "Bucket", "type": "string" }, "Enabled": { - "markdownDescription": "Specifies whether broker logs get sent to the specified Amazon S3 destination.", + "markdownDescription": "", "title": "Enabled", "type": "boolean" }, "Prefix": { - "markdownDescription": "The S3 prefix that is the destination for broker logs.", + "markdownDescription": "", "title": "Prefix", "type": "string" } @@ -152462,12 +152454,12 @@ "properties": { "Iam": { "$ref": "#/definitions/AWS::MSK::Cluster.Iam", - "markdownDescription": "Details for ClientAuthentication using IAM.", + "markdownDescription": "", "title": "Iam" }, "Scram": { "$ref": "#/definitions/AWS::MSK::Cluster.Scram", - "markdownDescription": "Details for SASL/SCRAM client authentication.", + "markdownDescription": "", "title": "Scram" } }, @@ -152477,7 +152469,7 @@ "additionalProperties": false, "properties": { "Enabled": { - "markdownDescription": "SASL/SCRAM authentication is enabled or not.", + "markdownDescription": "", "title": "Enabled", "type": "boolean" } @@ -152492,7 +152484,7 @@ "properties": { "EBSStorageInfo": { "$ref": "#/definitions/AWS::MSK::Cluster.EBSStorageInfo", - "markdownDescription": "EBS volume information.", + "markdownDescription": "", "title": "EBSStorageInfo" } }, @@ -152505,12 +152497,12 @@ "items": { "type": "string" }, - "markdownDescription": "List of AWS Private CA Amazon Resource Name (ARN)s.", + "markdownDescription": "", "title": "CertificateAuthorityArnList", "type": "array" }, "Enabled": { - "markdownDescription": "TLS authentication is enabled or not.", + "markdownDescription": "", "title": "Enabled", "type": "boolean" } @@ -152521,7 +152513,7 @@ "additionalProperties": false, "properties": { "Enabled": { - "markdownDescription": "Unauthenticated is enabled or not.", + "markdownDescription": "", "title": "Enabled", "type": "boolean" } @@ -152536,7 +152528,7 @@ "properties": { "ClientAuthentication": { "$ref": "#/definitions/AWS::MSK::Cluster.VpcConnectivityClientAuthentication", - "markdownDescription": "VPC connection control settings for brokers.", + "markdownDescription": "", "title": "ClientAuthentication" } }, @@ -152547,12 +152539,12 @@ "properties": { "Sasl": { "$ref": "#/definitions/AWS::MSK::Cluster.VpcConnectivitySasl", - "markdownDescription": "Details for VpcConnectivity ClientAuthentication using SASL.", + "markdownDescription": "", "title": "Sasl" }, "Tls": { "$ref": "#/definitions/AWS::MSK::Cluster.VpcConnectivityTls", - "markdownDescription": "Details for VpcConnectivity ClientAuthentication using TLS.", + "markdownDescription": "", "title": "Tls" } }, @@ -152562,7 +152554,7 @@ "additionalProperties": false, "properties": { "Enabled": { - "markdownDescription": "SASL/IAM authentication is enabled or not.", + "markdownDescription": "", "title": "Enabled", "type": "boolean" } @@ -152577,12 +152569,12 @@ "properties": { "Iam": { "$ref": "#/definitions/AWS::MSK::Cluster.VpcConnectivityIam", - "markdownDescription": "Details for ClientAuthentication using IAM for VpcConnectivity.", + "markdownDescription": "", "title": "Iam" }, "Scram": { "$ref": "#/definitions/AWS::MSK::Cluster.VpcConnectivityScram", - "markdownDescription": "Details for SASL/SCRAM client authentication for VpcConnectivity.", + "markdownDescription": "", "title": "Scram" } }, @@ -152592,7 +152584,7 @@ "additionalProperties": false, "properties": { "Enabled": { - "markdownDescription": "SASL/SCRAM authentication is enabled or not.", + "markdownDescription": "", "title": "Enabled", "type": "boolean" } @@ -152606,7 +152598,7 @@ "additionalProperties": false, "properties": { "Enabled": { - "markdownDescription": "TLS authentication is enabled or not.", + "markdownDescription": "", "title": "Enabled", "type": "boolean" } @@ -152725,7 +152717,7 @@ "additionalProperties": false, "properties": { "Description": { - "markdownDescription": "The description of the configuration.", + "markdownDescription": "", "title": "Description", "type": "string" }, @@ -152739,16 +152731,16 @@ }, "LatestRevision": { "$ref": "#/definitions/AWS::MSK::Configuration.LatestRevision", - "markdownDescription": "Latest revision of the configuration.", + "markdownDescription": "", "title": "LatestRevision" }, "Name": { - "markdownDescription": "The name of the configuration. Configuration names are strings that match the regex \"^[0-9A-Za-z][0-9A-Za-z-]{0,}$\".", + "markdownDescription": "", "title": "Name", "type": "string" }, "ServerProperties": { - "markdownDescription": "Contents of the server.properties file. When using the API, you must ensure that the contents of the file are base64 encoded. When using the console, the SDK, or the CLI, the contents of server.properties can be in plaintext.", + "markdownDescription": "", "title": "ServerProperties", "type": "string" } @@ -152837,12 +152829,12 @@ "additionalProperties": false, "properties": { "CurrentVersion": { - "markdownDescription": "", + "markdownDescription": "The current version number of the replicator.", "title": "CurrentVersion", "type": "string" }, "Description": { - "markdownDescription": "", + "markdownDescription": "A summary description of the replicator.", "title": "Description", "type": "string" }, @@ -152850,7 +152842,7 @@ "items": { "$ref": "#/definitions/AWS::MSK::Replicator.KafkaCluster" }, - "markdownDescription": "", + "markdownDescription": "Kafka Clusters to use in setting up sources / targets for replication.", "title": "KafkaClusters", "type": "array" }, @@ -152858,17 +152850,17 @@ "items": { "$ref": "#/definitions/AWS::MSK::Replicator.ReplicationInfo" }, - "markdownDescription": "", + "markdownDescription": "A list of replication configurations, where each configuration targets a given source cluster to target cluster replication flow.", "title": "ReplicationInfoList", "type": "array" }, "ReplicatorName": { - "markdownDescription": "", + "markdownDescription": "The name of the replicator. Alpha-numeric characters with '-' are allowed.", "title": "ReplicatorName", "type": "string" }, "ServiceExecutionRoleArn": { - "markdownDescription": "", + "markdownDescription": "The ARN of the IAM role used by the replicator to access resources in the customer's account (e.g source and target clusters)", "title": "ServiceExecutionRoleArn", "type": "string" }, @@ -152876,7 +152868,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "", + "markdownDescription": "List of tags to attach to created Replicator.", "title": "Tags", "type": "array" } @@ -152914,7 +152906,7 @@ "additionalProperties": false, "properties": { "MskClusterArn": { - "markdownDescription": "", + "markdownDescription": "The Amazon Resource Name (ARN) of an Amazon MSK cluster.", "title": "MskClusterArn", "type": "string" } @@ -152931,7 +152923,7 @@ "items": { "type": "string" }, - "markdownDescription": "", + "markdownDescription": "List of regular expression patterns indicating the consumer groups that should not be replicated.", "title": "ConsumerGroupsToExclude", "type": "array" }, @@ -152939,17 +152931,17 @@ "items": { "type": "string" }, - "markdownDescription": "", + "markdownDescription": "List of regular expression patterns indicating the consumer groups to copy.", "title": "ConsumerGroupsToReplicate", "type": "array" }, "DetectAndCopyNewConsumerGroups": { - "markdownDescription": "", + "markdownDescription": "Enables synchronization of consumer groups to target cluster.", "title": "DetectAndCopyNewConsumerGroups", "type": "boolean" }, "SynchroniseConsumerGroupOffsets": { - "markdownDescription": "", + "markdownDescription": "Enables synchronization of consumer group offsets to target cluster. The translated offsets will be written to topic __consumer_offsets.", "title": "SynchroniseConsumerGroupOffsets", "type": "boolean" } @@ -152964,12 +152956,12 @@ "properties": { "AmazonMskCluster": { "$ref": "#/definitions/AWS::MSK::Replicator.AmazonMskCluster", - "markdownDescription": "", + "markdownDescription": "Details of an Amazon MSK Cluster.", "title": "AmazonMskCluster" }, "VpcConfig": { "$ref": "#/definitions/AWS::MSK::Replicator.KafkaClusterClientVpcConfig", - "markdownDescription": "", + "markdownDescription": "Details of an Amazon VPC which has network connectivity to the Apache Kafka cluster.", "title": "VpcConfig" } }, @@ -152986,7 +152978,7 @@ "items": { "type": "string" }, - "markdownDescription": "", + "markdownDescription": "The security groups to attach to the ENIs for the broker nodes.", "title": "SecurityGroupIds", "type": "array" }, @@ -152994,7 +152986,7 @@ "items": { "type": "string" }, - "markdownDescription": "", + "markdownDescription": "The list of subnets in the client VPC to connect to.", "title": "SubnetIds", "type": "array" } @@ -153009,27 +153001,27 @@ "properties": { "ConsumerGroupReplication": { "$ref": "#/definitions/AWS::MSK::Replicator.ConsumerGroupReplication", - "markdownDescription": "", + "markdownDescription": "Configuration relating to consumer group replication.", "title": "ConsumerGroupReplication" }, "SourceKafkaClusterArn": { - "markdownDescription": "", + "markdownDescription": "The ARN of the source Kafka cluster.", "title": "SourceKafkaClusterArn", "type": "string" }, "TargetCompressionType": { - "markdownDescription": "", + "markdownDescription": "The compression type to use when producing records to target cluster.", "title": "TargetCompressionType", "type": "string" }, "TargetKafkaClusterArn": { - "markdownDescription": "", + "markdownDescription": "The ARN of the target Kafka cluster.", "title": "TargetKafkaClusterArn", "type": "string" }, "TopicReplication": { "$ref": "#/definitions/AWS::MSK::Replicator.TopicReplication", - "markdownDescription": "", + "markdownDescription": "Configuration relating to topic replication.", "title": "TopicReplication" } }, @@ -153046,7 +153038,7 @@ "additionalProperties": false, "properties": { "Type": { - "markdownDescription": "", + "markdownDescription": "The type of replication starting position.", "title": "Type", "type": "string" } @@ -153057,30 +153049,30 @@ "additionalProperties": false, "properties": { "CopyAccessControlListsForTopics": { - "markdownDescription": "", + "markdownDescription": "Whether to periodically configure remote topic ACLs to match their corresponding upstream topics.", "title": "CopyAccessControlListsForTopics", "type": "boolean" }, "CopyTopicConfigurations": { - "markdownDescription": "", + "markdownDescription": "Whether to periodically configure remote topics to match their corresponding upstream topics.", "title": "CopyTopicConfigurations", "type": "boolean" }, "DetectAndCopyNewTopics": { - "markdownDescription": "", + "markdownDescription": "Whether to periodically check for new topics and partitions.", "title": "DetectAndCopyNewTopics", "type": "boolean" }, "StartingPosition": { "$ref": "#/definitions/AWS::MSK::Replicator.ReplicationStartingPosition", - "markdownDescription": "", + "markdownDescription": "Specifies the position in the topics to start replicating from.", "title": "StartingPosition" }, "TopicsToExclude": { "items": { "type": "string" }, - "markdownDescription": "", + "markdownDescription": "List of regular expression patterns indicating the topics that should not be replicated.", "title": "TopicsToExclude", "type": "array" }, @@ -153088,7 +153080,7 @@ "items": { "type": "string" }, - "markdownDescription": "", + "markdownDescription": "List of regular expression patterns indicating the topics to copy.", "title": "TopicsToReplicate", "type": "array" } @@ -153135,7 +153127,7 @@ "properties": { "ClientAuthentication": { "$ref": "#/definitions/AWS::MSK::ServerlessCluster.ClientAuthentication", - "markdownDescription": "Includes all client authentication information.", + "markdownDescription": "", "title": "ClientAuthentication" }, "ClusterName": { @@ -153196,7 +153188,7 @@ "properties": { "Sasl": { "$ref": "#/definitions/AWS::MSK::ServerlessCluster.Sasl", - "markdownDescription": "Details for client authentication using SASL. To turn on SASL, you must also turn on `EncryptionInTransit` by setting `inCluster` to true. You must set `clientBroker` to either `TLS` or `TLS_PLAINTEXT` . If you choose `TLS_PLAINTEXT` , then you must also set `unauthenticated` to true.", + "markdownDescription": "", "title": "Sasl" } }, @@ -153209,7 +153201,7 @@ "additionalProperties": false, "properties": { "Enabled": { - "markdownDescription": "SASL/IAM authentication is enabled or not.", + "markdownDescription": "", "title": "Enabled", "type": "boolean" } @@ -153224,7 +153216,7 @@ "properties": { "Iam": { "$ref": "#/definitions/AWS::MSK::ServerlessCluster.Iam", - "markdownDescription": "Details for ClientAuthentication using IAM.", + "markdownDescription": "", "title": "Iam" } }, @@ -153302,7 +153294,7 @@ "items": { "type": "string" }, - "markdownDescription": "The list of subnets in the client VPC to connect to.", + "markdownDescription": "", "title": "ClientSubnets", "type": "array" }, @@ -153310,13 +153302,13 @@ "items": { "type": "string" }, - "markdownDescription": "The security groups to attach to the ENIs for the broker nodes.", + "markdownDescription": "", "title": "SecurityGroups", "type": "array" }, "Tags": { "additionalProperties": true, - "markdownDescription": "Create tags when creating the VPC connection.", + "markdownDescription": "", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" @@ -153326,12 +153318,12 @@ "type": "object" }, "TargetClusterArn": { - "markdownDescription": "The Amazon Resource Name (ARN) of the cluster.", + "markdownDescription": "", "title": "TargetClusterArn", "type": "string" }, "VpcId": { - "markdownDescription": "The VPC id of the remote client.", + "markdownDescription": "", "title": "VpcId", "type": "string" } @@ -153407,7 +153399,7 @@ "type": "object" }, "AirflowVersion": { - "markdownDescription": "The version of Apache Airflow to use for the environment. If no value is specified, defaults to the latest version.\n\nIf you specify a newer version number for an existing environment, the version update requires some service interruption before taking effect.\n\n*Allowed Values* : `1.10.12` | `2.0.2` | `2.2.2` | `2.4.3` | `2.5.1` | `2.6.3` | `2.7.2` (latest)", + "markdownDescription": "The version of Apache Airflow to use for the environment. If no value is specified, defaults to the latest version.\n\nIf you specify a newer version number for an existing environment, the version update requires some service interruption before taking effect.\n\n*Allowed Values* : `1.10.12` | `2.0.2` | `2.2.2` | `2.4.3` | `2.5.1` | `2.6.3` | `2.7.2` | `2.8.1` | `2.9.2` (latest)", "title": "AirflowVersion", "type": "string" }, @@ -161720,8 +161712,6 @@ "additionalProperties": false, "properties": { "ChannelId": { - "markdownDescription": "The unique ID of the channel.", - "title": "ChannelId", "type": "string" }, "MultiplexId": { @@ -166064,7 +166054,7 @@ "type": "boolean" }, "KmsKeyId": { - "markdownDescription": "If `StorageEncrypted` is true, the Amazon KMS key identifier for the encrypted DB cluster.", + "markdownDescription": "The Amazon Resource Name (ARN) of the KMS key that is used to encrypt the database instances in the DB cluster, such as `arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef` . If you enable the `StorageEncrypted` property but don't specify this property, the default KMS key is used. If you specify this property, you must set the `StorageEncrypted` property to `true` .", "title": "KmsKeyId", "type": "string" }, @@ -166104,7 +166094,7 @@ "type": "string" }, "StorageEncrypted": { - "markdownDescription": "Indicates whether the DB cluster is encrypted.\n\nIf you specify the `DBClusterIdentifier` , `DBSnapshotIdentifier` , or `SourceDBInstanceIdentifier` property, don't specify this property. The value is inherited from the cluster, snapshot, or source DB instance. If you specify the `KmsKeyId` property, you must enable encryption.\n\nIf you specify the `KmsKeyId` , you must enable encryption by setting `StorageEncrypted` to true.", + "markdownDescription": "Indicates whether the DB cluster is encrypted.\n\nIf you specify the `KmsKeyId` property, then you must enable encryption and set this property to `true` .\n\nIf you enable the `StorageEncrypted` property but don't specify the `KmsKeyId` property, then the default KMS key is used. If you specify the `KmsKeyId` property, then that KMS key is used to encrypt the database instances in the DB cluster.\n\nIf you specify the `SourceDBClusterIdentifier` property, and don't specify this property or disable it, the value is inherited from the source DB cluster. If the source DB cluster is encrypted, the `KmsKeyId` property from the source cluster is used.\n\nIf you specify the `DBSnapshotIdentifier` and don't specify this property or disable it, the value is inherited from the snapshot and the specified `KmsKeyId` property from the snapshot is used.", "title": "StorageEncrypted", "type": "boolean" }, @@ -167328,7 +167318,7 @@ "properties": { "LogDestination": { "additionalProperties": true, - "markdownDescription": "The named location for the logs, provided in a key:value mapping that is specific to the chosen destination type.\n\n- For an Amazon S3 bucket, provide the name of the bucket, with key `bucketName` , and optionally provide a prefix, with key `prefix` . The following example specifies an Amazon S3 bucket named `DOC-EXAMPLE-BUCKET` and the prefix `alerts` :\n\n`\"LogDestination\": { \"bucketName\": \"DOC-EXAMPLE-BUCKET\", \"prefix\": \"alerts\" }`\n- For a CloudWatch log group, provide the name of the CloudWatch log group, with key `logGroup` . The following example specifies a log group named `alert-log-group` :\n\n`\"LogDestination\": { \"logGroup\": \"alert-log-group\" }`\n- For a Firehose delivery stream, provide the name of the delivery stream, with key `deliveryStream` . The following example specifies a delivery stream named `alert-delivery-stream` :\n\n`\"LogDestination\": { \"deliveryStream\": \"alert-delivery-stream\" }`", + "markdownDescription": "The named location for the logs, provided in a key:value mapping that is specific to the chosen destination type.\n\n- For an Amazon S3 bucket, provide the name of the bucket, with key `bucketName` , and optionally provide a prefix, with key `prefix` .\n\nThe following example specifies an Amazon S3 bucket named `DOC-EXAMPLE-BUCKET` and the prefix `alerts` :\n\n`\"LogDestination\": { \"bucketName\": \"DOC-EXAMPLE-BUCKET\", \"prefix\": \"alerts\" }`\n- For a CloudWatch log group, provide the name of the CloudWatch log group, with key `logGroup` . The following example specifies a log group named `alert-log-group` :\n\n`\"LogDestination\": { \"logGroup\": \"alert-log-group\" }`\n- For a Firehose delivery stream, provide the name of the delivery stream, with key `deliveryStream` . The following example specifies a delivery stream named `alert-delivery-stream` :\n\n`\"LogDestination\": { \"deliveryStream\": \"alert-delivery-stream\" }`", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" @@ -167343,7 +167333,7 @@ "type": "string" }, "LogType": { - "markdownDescription": "The type of log to send. Alert logs report traffic that matches a stateful rule with an action setting that sends an alert log message. Flow logs are standard network traffic flow logs.", + "markdownDescription": "The type of log to record. You can record the following types of logs from your Network Firewall stateful engine.\n\n- `ALERT` - Logs for traffic that matches your stateful rules and that have an action that sends an alert. A stateful rule sends alerts for the rule actions DROP, ALERT, and REJECT. For more information, see the `StatefulRule` property.\n- `FLOW` - Standard network traffic flow logs. The stateful rules engine records flow logs for all network traffic that it receives. Each flow log record captures the network flow for a specific standard stateless rule group.\n- `TLS` - Logs for events that are related to TLS inspection. For more information, see [Inspecting SSL/TLS traffic with TLS inspection configurations](https://docs.aws.amazon.com/network-firewall/latest/developerguide/tls-inspection-configurations.html) in the *Network Firewall Developer Guide* .", "title": "LogType", "type": "string" } @@ -174896,7 +174886,7 @@ "additionalProperties": false, "properties": { "Content": { - "markdownDescription": "The policy text content. You can specify the policy content as a JSON object or a JSON string.\n\n> When you specify the policy content as a JSON string, you can't perform drift detection on the CloudFormation stack. For this reason, we recommend specifying the policy content as a JSON object instead. \n\nThe text that you supply must adhere to the rules of the policy type you specify in the `Type` parameter. The following AWS Organizations quotas are enforced for the maximum size of a policy document:\n\n- Service control policies: 5,120 characters\n- AI services opt-out policies: 2,500 characters\n- Backup policies: 10,000 characters\n- Tag policies: 10,000 characters\n\nFor more information about Organizations service quotas, see [Quotas for AWS Organizations](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) in the *AWS Organizations User Guide* .", + "markdownDescription": "The policy text content. You can specify the policy content as a JSON object or a JSON string.\n\n> When you specify the policy content as a JSON string, you can't perform drift detection on the CloudFormation stack. For this reason, we recommend specifying the policy content as a JSON object instead. \n\nThe text that you supply must adhere to the rules of the policy type you specify in the `Type` parameter. The following AWS Organizations quotas are enforced for the maximum size of a policy document:\n\n- Service control policies: 5,120 characters\n- Backup policies: 10,000 characters\n- Tag policies: 10,000 characters\n- Chatbot policies: 10,000 characters\n- AI services opt-out policies: 2,500 characters\n\nFor more information about Organizations service quotas, see [Quotas for AWS Organizations](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) in the *AWS Organizations User Guide* .", "title": "Content", "type": "object" }, @@ -181959,22 +181949,22 @@ "type": "number" }, "MaximumRecordAgeInSeconds": { - "markdownDescription": "(Streams only) Discard records older than the specified age. The default value is -1, which sets the maximum age to infinite. When the value is set to infinite, EventBridge never discards old records.", + "markdownDescription": "Discard records older than the specified age. The default value is -1, which sets the maximum age to infinite. When the value is set to infinite, EventBridge never discards old records.", "title": "MaximumRecordAgeInSeconds", "type": "number" }, "MaximumRetryAttempts": { - "markdownDescription": "(Streams only) Discard records after the specified number of retries. The default value is -1, which sets the maximum number of retries to infinite. When MaximumRetryAttempts is infinite, EventBridge retries failed records until the record expires in the event source.", + "markdownDescription": "Discard records after the specified number of retries. The default value is -1, which sets the maximum number of retries to infinite. When MaximumRetryAttempts is infinite, EventBridge retries failed records until the record expires in the event source.", "title": "MaximumRetryAttempts", "type": "number" }, "OnPartialBatchItemFailure": { - "markdownDescription": "(Streams only) Define how to handle item process failures. `AUTOMATIC_BISECT` halves each batch and retry each half until all the records are processed or there is one failed message left in the batch.", + "markdownDescription": "Define how to handle item process failures. `AUTOMATIC_BISECT` halves each batch and retry each half until all the records are processed or there is one failed message left in the batch.", "title": "OnPartialBatchItemFailure", "type": "string" }, "ParallelizationFactor": { - "markdownDescription": "(Streams only) The number of batches to process concurrently from each shard. The default value is 1.", + "markdownDescription": "The number of batches to process concurrently from each shard. The default value is 1.", "title": "ParallelizationFactor", "type": "number" }, @@ -182008,27 +181998,27 @@ "type": "number" }, "MaximumRecordAgeInSeconds": { - "markdownDescription": "(Streams only) Discard records older than the specified age. The default value is -1, which sets the maximum age to infinite. When the value is set to infinite, EventBridge never discards old records.", + "markdownDescription": "Discard records older than the specified age. The default value is -1, which sets the maximum age to infinite. When the value is set to infinite, EventBridge never discards old records.", "title": "MaximumRecordAgeInSeconds", "type": "number" }, "MaximumRetryAttempts": { - "markdownDescription": "(Streams only) Discard records after the specified number of retries. The default value is -1, which sets the maximum number of retries to infinite. When MaximumRetryAttempts is infinite, EventBridge retries failed records until the record expires in the event source.", + "markdownDescription": "Discard records after the specified number of retries. The default value is -1, which sets the maximum number of retries to infinite. When MaximumRetryAttempts is infinite, EventBridge retries failed records until the record expires in the event source.", "title": "MaximumRetryAttempts", "type": "number" }, "OnPartialBatchItemFailure": { - "markdownDescription": "(Streams only) Define how to handle item process failures. `AUTOMATIC_BISECT` halves each batch and retry each half until all the records are processed or there is one failed message left in the batch.", + "markdownDescription": "Define how to handle item process failures. `AUTOMATIC_BISECT` halves each batch and retry each half until all the records are processed or there is one failed message left in the batch.", "title": "OnPartialBatchItemFailure", "type": "string" }, "ParallelizationFactor": { - "markdownDescription": "(Streams only) The number of batches to process concurrently from each shard. The default value is 1.", + "markdownDescription": "The number of batches to process concurrently from each shard. The default value is 1.", "title": "ParallelizationFactor", "type": "number" }, "StartingPosition": { - "markdownDescription": "(Streams only) The position in a stream from which to start reading.", + "markdownDescription": "The position in a stream from which to start reading.", "title": "StartingPosition", "type": "string" }, @@ -182067,7 +182057,7 @@ "type": "number" }, "StartingPosition": { - "markdownDescription": "(Streams only) The position in a stream from which to start reading.", + "markdownDescription": "The position in a stream from which to start reading.", "title": "StartingPosition", "type": "string" }, @@ -182200,7 +182190,7 @@ "type": "string" }, "StartingPosition": { - "markdownDescription": "(Streams only) The position in a stream from which to start reading.", + "markdownDescription": "The position in a stream from which to start reading.", "title": "StartingPosition", "type": "string" }, @@ -182693,7 +182683,7 @@ "type": "string" }, "OutputFormat": { - "markdownDescription": "The format EventBridge uses for the log records.\n\n- `json` : JSON\n- `plain` : Plain text\n- `w3c` : [W3C extended logging file format](https://docs.aws.amazon.com/https://www.w3.org/TR/WD-logfile)", + "markdownDescription": "The format EventBridge uses for the log records.\n\nEventBridge currently only supports `json` formatting.", "title": "OutputFormat", "type": "string" }, @@ -182758,7 +182748,7 @@ "items": { "type": "string" }, - "markdownDescription": "Specifies the security groups associated with the stream. These security groups must all be in the same VPC. You can specify as many as five security groups. If you do not specify a security group, the default security group for the VPC is used.", + "markdownDescription": "Specifies the security groups associated with the stream. These security groups must all be in the same VPC. You can specify as many as five security groups.", "title": "SecurityGroup", "type": "array" }, @@ -224578,7 +224568,7 @@ "type": "array" }, "BacktrackWindow": { - "markdownDescription": "The target backtrack window, in seconds. To disable backtracking, set this value to 0.\n\n> Currently, Backtrack is only supported for Aurora MySQL DB clusters. \n\nDefault: 0\n\nConstraints:\n\n- If specified, this value must be set to a number from 0 to 259,200 (72 hours).\n\nValid for: Aurora MySQL DB clusters only", + "markdownDescription": "The target backtrack window, in seconds. To disable backtracking, set this value to `0` .\n\nValid for Cluster Type: Aurora MySQL DB clusters only\n\nDefault: `0`\n\nConstraints:\n\n- If specified, this value must be set to a number from 0 to 259,200 (72 hours).", "title": "BacktrackWindow", "type": "number" }, @@ -224761,7 +224751,7 @@ "type": "string" }, "PubliclyAccessible": { - "markdownDescription": "Specifies whether the DB cluster is publicly accessible.\n\nWhen the DB cluster is publicly accessible, its Domain Name System (DNS) endpoint resolves to the private IP address from within the DB cluster's virtual private cloud (VPC). It resolves to the public IP address from outside of the DB cluster's VPC. Access to the DB cluster is ultimately controlled by the security group it uses. That public access isn't permitted if the security group assigned to the DB cluster doesn't permit it.\n\nWhen the DB cluster isn't publicly accessible, it is an internal DB cluster with a DNS name that resolves to a private IP address.\n\nValid for Cluster Type: Multi-AZ DB clusters only\n\nDefault: The default behavior varies depending on whether `DBSubnetGroupName` is specified.\n\nIf `DBSubnetGroupName` isn't specified, and `PubliclyAccessible` isn't specified, the following applies:\n\n- If the default VPC in the target Region doesn\u2019t have an internet gateway attached to it, the DB cluster is private.\n- If the default VPC in the target Region has an internet gateway attached to it, the DB cluster is public.\n\nIf `DBSubnetGroupName` is specified, and `PubliclyAccessible` isn't specified, the following applies:\n\n- If the subnets are part of a VPC that doesn\u2019t have an internet gateway attached to it, the DB cluster is private.\n- If the subnets are part of a VPC that has an internet gateway attached to it, the DB cluster is public.", + "markdownDescription": "Specifies whether the DB cluster is publicly accessible.\n\nWhen the DB cluster is publicly accessible and you connect from outside of the DB cluster's virtual private cloud (VPC), its Domain Name System (DNS) endpoint resolves to the public IP address. When you connect from within the same VPC as the DB cluster, the endpoint resolves to the private IP address. Access to the DB cluster is ultimately controlled by the security group it uses. That public access isn't permitted if the security group assigned to the DB cluster doesn't permit it.\n\nWhen the DB cluster isn't publicly accessible, it is an internal DB cluster with a DNS name that resolves to a private IP address.\n\nValid for Cluster Type: Multi-AZ DB clusters only\n\nDefault: The default behavior varies depending on whether `DBSubnetGroupName` is specified.\n\nIf `DBSubnetGroupName` isn't specified, and `PubliclyAccessible` isn't specified, the following applies:\n\n- If the default VPC in the target Region doesn\u2019t have an internet gateway attached to it, the DB cluster is private.\n- If the default VPC in the target Region has an internet gateway attached to it, the DB cluster is public.\n\nIf `DBSubnetGroupName` is specified, and `PubliclyAccessible` isn't specified, the following applies:\n\n- If the subnets are part of a VPC that doesn\u2019t have an internet gateway attached to it, the DB cluster is private.\n- If the subnets are part of a VPC that has an internet gateway attached to it, the DB cluster is public.", "title": "PubliclyAccessible", "type": "boolean" }, @@ -224819,7 +224809,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "An optional array of key-value pairs to apply to this DB cluster.\n\nValid for: Aurora DB clusters and Multi-AZ DB clusters", + "markdownDescription": "Tags to assign to the DB cluster.\n\nValid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters", "title": "Tags", "type": "array" }, @@ -224903,7 +224893,7 @@ "type": "string" }, "SecretArn": { - "markdownDescription": "The Amazon Resource Name (ARN) of the secret.", + "markdownDescription": "The Amazon Resource Name (ARN) of the secret. This parameter is a return value that you can retrieve using the `Fn::GetAtt` intrinsic function. For more information, see [Return values](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-rds-dbcluster.html#aws-resource-rds-dbcluster-return-values) .", "title": "SecretArn", "type": "string" } @@ -225009,17 +224999,17 @@ "additionalProperties": false, "properties": { "DBClusterParameterGroupName": { - "markdownDescription": "The name of the DB cluster parameter group.\n\nConstraints:\n\n- Must not match the name of an existing DB cluster parameter group.\n\nIf you don't specify a value for `DBClusterParameterGroupName` property, a name is automatically created for the DB cluster parameter group.\n\n> This value is stored as a lowercase string.", + "markdownDescription": "The name of the DB cluster parameter group.\n\nConstraints:\n\n- Must not match the name of an existing DB cluster parameter group.\n\n> This value is stored as a lowercase string.", "title": "DBClusterParameterGroupName", "type": "string" }, "Description": { - "markdownDescription": "A friendly description for this DB cluster parameter group.", + "markdownDescription": "The description for the DB cluster parameter group.", "title": "Description", "type": "string" }, "Family": { - "markdownDescription": "The DB cluster parameter group family name. A DB cluster parameter group can be associated with one and only one DB cluster parameter group family, and can be applied only to a DB cluster running a DB engine and engine version compatible with that DB cluster parameter group family.\n\n> The DB cluster parameter group family can't be changed when updating a DB cluster parameter group. \n\nTo list all of the available parameter group families, use the following command:\n\n`aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\"`\n\nThe output contains duplicates.\n\nFor more information, see `[CreateDBClusterParameterGroup](https://docs.aws.amazon.com//AmazonRDS/latest/APIReference/API_CreateDBClusterParameterGroup.html)` .", + "markdownDescription": "The DB cluster parameter group family name. A DB cluster parameter group can be associated with one and only one DB cluster parameter group family, and can be applied only to a DB cluster running a database engine and engine version compatible with that DB cluster parameter group family.\n\n*Aurora MySQL*\n\nExample: `aurora-mysql5.7` , `aurora-mysql8.0`\n\n*Aurora PostgreSQL*\n\nExample: `aurora-postgresql14`\n\n*RDS for MySQL*\n\nExample: `mysql8.0`\n\n*RDS for PostgreSQL*\n\nExample: `postgres13`\n\nTo list all of the available parameter group families for a DB engine, use the following command:\n\n`aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\" --engine `\n\nFor example, to list all of the available parameter group families for the Aurora PostgreSQL DB engine, use the following command:\n\n`aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\" --engine aurora-postgresql`\n\n> The output contains duplicates. \n\nThe following are the valid DB engine values:\n\n- `aurora-mysql`\n- `aurora-postgresql`\n- `mysql`\n- `postgres`", "title": "Family", "type": "string" }, @@ -225032,7 +225022,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "An optional array of key-value pairs to apply to this DB cluster parameter group.", + "markdownDescription": "Tags to assign to the DB cluster parameter group.", "title": "Tags", "type": "array" } @@ -225129,7 +225119,7 @@ "type": "string" }, "AutomaticBackupReplicationRegion": { - "markdownDescription": "", + "markdownDescription": "The AWS Region associated with the automated backup.", "title": "AutomaticBackupReplicationRegion", "type": "string" }, @@ -225174,7 +225164,7 @@ "type": "string" }, "DBClusterIdentifier": { - "markdownDescription": "The identifier of the DB cluster that the instance will belong to.", + "markdownDescription": "The identifier of the DB cluster that this DB instance will belong to.\n\nThis setting doesn't apply to RDS Custom DB instances.", "title": "DBClusterIdentifier", "type": "string" }, @@ -225212,12 +225202,12 @@ "type": "array" }, "DBSnapshotIdentifier": { - "markdownDescription": "The name or Amazon Resource Name (ARN) of the DB snapshot that's used to restore the DB instance. If you're restoring from a shared manual DB snapshot, you must specify the ARN of the snapshot.\n\nBy specifying this property, you can create a DB instance from the specified DB snapshot. If the `DBSnapshotIdentifier` property is an empty string or the `AWS::RDS::DBInstance` declaration has no `DBSnapshotIdentifier` property, AWS CloudFormation creates a new database. If the property contains a value (other than an empty string), AWS CloudFormation creates a database from the specified snapshot. If a snapshot with the specified name doesn't exist, AWS CloudFormation can't create the database and it rolls back the stack.\n\nSome DB instance properties aren't valid when you restore from a snapshot, such as the `MasterUsername` and `MasterUserPassword` properties. For information about the properties that you can specify, see the `RestoreDBInstanceFromDBSnapshot` action in the *Amazon RDS API Reference* .\n\nAfter you restore a DB instance with a `DBSnapshotIdentifier` property, you must specify the same `DBSnapshotIdentifier` property for any future updates to the DB instance. When you specify this property for an update, the DB instance is not restored from the DB snapshot again, and the data in the database is not changed. However, if you don't specify the `DBSnapshotIdentifier` property, an empty DB instance is created, and the original DB instance is deleted. If you specify a property that is different from the previous snapshot restore property, a new DB instance is restored from the specified `DBSnapshotIdentifier` property, and the original DB instance is deleted.\n\nIf you specify the `DBSnapshotIdentifier` property to restore a DB instance (as opposed to specifying it for DB instance updates), then don't specify the following properties:\n\n- `CharacterSetName`\n- `DBClusterIdentifier`\n- `DBName`\n- `DeleteAutomatedBackups`\n- `EnablePerformanceInsights`\n- `KmsKeyId`\n- `MasterUsername`\n- `MasterUserPassword`\n- `PerformanceInsightsKMSKeyId`\n- `PerformanceInsightsRetentionPeriod`\n- `PromotionTier`\n- `SourceDBInstanceIdentifier`\n- `SourceRegion`\n- `StorageEncrypted` (for an encrypted snapshot)\n- `Timezone`\n\n*Amazon Aurora*\n\nNot applicable. Snapshot restore is managed by the DB cluster.", + "markdownDescription": "The name or Amazon Resource Name (ARN) of the DB snapshot that's used to restore the DB instance. If you're restoring from a shared manual DB snapshot, you must specify the ARN of the snapshot.\n\nBy specifying this property, you can create a DB instance from the specified DB snapshot. If the `DBSnapshotIdentifier` property is an empty string or the `AWS::RDS::DBInstance` declaration has no `DBSnapshotIdentifier` property, AWS CloudFormation creates a new database. If the property contains a value (other than an empty string), AWS CloudFormation creates a database from the specified snapshot. If a snapshot with the specified name doesn't exist, AWS CloudFormation can't create the database and it rolls back the stack.\n\nSome DB instance properties aren't valid when you restore from a snapshot, such as the `MasterUsername` and `MasterUserPassword` properties. For information about the properties that you can specify, see the `RestoreDBInstanceFromDBSnapshot` action in the *Amazon RDS API Reference* .\n\nAfter you restore a DB instance with a `DBSnapshotIdentifier` property, you must specify the same `DBSnapshotIdentifier` property for any future updates to the DB instance. When you specify this property for an update, the DB instance is not restored from the DB snapshot again, and the data in the database is not changed. However, if you don't specify the `DBSnapshotIdentifier` property, an empty DB instance is created, and the original DB instance is deleted. If you specify a property that is different from the previous snapshot restore property, a new DB instance is restored from the specified `DBSnapshotIdentifier` property, and the original DB instance is deleted.\n\nIf you specify the `DBSnapshotIdentifier` property to restore a DB instance (as opposed to specifying it for DB instance updates), then don't specify the following properties:\n\n- `CharacterSetName`\n- `DBClusterIdentifier`\n- `DBName`\n- `KmsKeyId`\n- `MasterUsername`\n- `MasterUserPassword`\n- `PromotionTier`\n- `SourceDBInstanceIdentifier`\n- `SourceRegion`\n- `StorageEncrypted` (for an unencrypted snapshot)\n- `Timezone`\n\n*Amazon Aurora*\n\nNot applicable. Snapshot restore is managed by the DB cluster.", "title": "DBSnapshotIdentifier", "type": "string" }, "DBSubnetGroupName": { - "markdownDescription": "A DB subnet group to associate with the DB instance. If you update this value, the new subnet group must be a subnet group in a new VPC.\n\nIf there's no DB subnet group, then the DB instance isn't a VPC DB instance.\n\nFor more information about using Amazon RDS in a VPC, see [Using Amazon RDS with Amazon Virtual Private Cloud (VPC)](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_VPC.html) in the *Amazon RDS User Guide* .\n\n*Amazon Aurora*\n\nNot applicable. The DB subnet group is managed by the DB cluster. If specified, the setting must match the DB cluster setting.", + "markdownDescription": "A DB subnet group to associate with the DB instance. If you update this value, the new subnet group must be a subnet group in a new VPC.\n\nIf there's no DB subnet group, then the DB instance isn't a VPC DB instance.\n\nFor more information about using Amazon RDS in a VPC, see [Amazon VPC and Amazon RDS](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_VPC.html) in the *Amazon RDS User Guide* .\n\nThis setting doesn't apply to Amazon Aurora DB instances. The DB subnet group is managed by the DB cluster. If specified, the setting must match the DB cluster setting.", "title": "DBSubnetGroupName", "type": "string" }, @@ -225232,7 +225222,7 @@ "type": "boolean" }, "DeletionProtection": { - "markdownDescription": "A value that indicates whether the DB instance has deletion protection enabled. The database can't be deleted when deletion protection is enabled. By default, deletion protection is disabled. For more information, see [Deleting a DB Instance](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_DeleteInstance.html) .\n\n*Amazon Aurora*\n\nNot applicable. You can enable or disable deletion protection for the DB cluster. For more information, see `CreateDBCluster` . DB instances in a DB cluster can be deleted even when deletion protection is enabled for the DB cluster.", + "markdownDescription": "Specifies whether the DB instance has deletion protection enabled. The database can't be deleted when deletion protection is enabled. By default, deletion protection isn't enabled. For more information, see [Deleting a DB Instance](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_DeleteInstance.html) .\n\nThis setting doesn't apply to Amazon Aurora DB instances. You can enable or disable deletion protection for the DB cluster. For more information, see `CreateDBCluster` . DB instances in a DB cluster can be deleted even when deletion protection is enabled for the DB cluster.", "title": "DeletionProtection", "type": "boolean" }, @@ -225343,7 +225333,7 @@ "type": "number" }, "MonitoringInterval": { - "markdownDescription": "The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance. To disable collection of Enhanced Monitoring metrics, specify 0. The default is 0.\n\nIf `MonitoringRoleArn` is specified, then you must set `MonitoringInterval` to a value other than 0.\n\nThis setting doesn't apply to RDS Custom.\n\nValid Values: `0, 1, 5, 10, 15, 30, 60`", + "markdownDescription": "The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance. To disable collection of Enhanced Monitoring metrics, specify `0` .\n\nIf `MonitoringRoleArn` is specified, then you must set `MonitoringInterval` to a value other than `0` .\n\nThis setting doesn't apply to RDS Custom DB instances.\n\nValid Values: `0 | 1 | 5 | 10 | 15 | 30 | 60`\n\nDefault: `0`", "title": "MonitoringInterval", "type": "number" }, @@ -225353,7 +225343,7 @@ "type": "string" }, "MultiAZ": { - "markdownDescription": "Specifies whether the database instance is a Multi-AZ DB instance deployment. You can't set the `AvailabilityZone` parameter if the `MultiAZ` parameter is set to true.\n\nFor more information, see [Multi-AZ deployments for high availability](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.MultiAZ.html) in the *Amazon RDS User Guide* .\n\n*Amazon Aurora*\n\nNot applicable. Amazon Aurora storage is replicated across all of the Availability Zones and doesn't require the `MultiAZ` option to be set.", + "markdownDescription": "Specifies whether the DB instance is a Multi-AZ deployment. You can't set the `AvailabilityZone` parameter if the DB instance is a Multi-AZ deployment.\n\nThis setting doesn't apply to the following DB instances:\n\n- Amazon Aurora (DB instance Availability Zones (AZs) are managed by the DB cluster.)\n- RDS Custom", "title": "MultiAZ", "type": "boolean" }, @@ -225383,7 +225373,7 @@ "type": "number" }, "Port": { - "markdownDescription": "The port number on which the database accepts connections.\n\n*Amazon Aurora*\n\nNot applicable. The port number is managed by the DB cluster.\n\n*Db2*\n\nDefault value: `50000`", + "markdownDescription": "The port number on which the database accepts connections.\n\nThis setting doesn't apply to Aurora DB instances. The port number is managed by the cluster.\n\nValid Values: `1150-65535`\n\nDefault:\n\n- RDS for Db2 - `50000`\n- RDS for MariaDB - `3306`\n- RDS for Microsoft SQL Server - `1433`\n- RDS for MySQL - `3306`\n- RDS for Oracle - `1521`\n- RDS for PostgreSQL - `5432`\n\nConstraints:\n\n- For RDS for Microsoft SQL Server, the value can't be `1234` , `1434` , `3260` , `3343` , `3389` , `47001` , or `49152-49156` .", "title": "Port", "type": "string" }, @@ -225421,7 +225411,7 @@ "type": "string" }, "RestoreTime": { - "markdownDescription": "The date and time to restore from.\n\nConstraints:\n\n- Must be a time in Universal Coordinated Time (UTC) format.\n- Must be before the latest restorable time for the DB instance.\n- Can't be specified if the `UseLatestRestorableTime` parameter is enabled.\n\nExample: `2009-09-07T23:45:00Z`", + "markdownDescription": "The date and time to restore from. This parameter applies to point-in-time recovery. For more information, see [Restoring a DB instance to a specified time](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_PIT.html) in the in the *Amazon RDS User Guide* .\n\nConstraints:\n\n- Must be a time in Universal Coordinated Time (UTC) format.\n- Must be before the latest restorable time for the DB instance.\n- Can't be specified if the `UseLatestRestorableTime` parameter is enabled.\n\nExample: `2009-09-07T23:45:00Z`", "title": "RestoreTime", "type": "string" }, @@ -225469,7 +225459,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "An optional array of key-value pairs to apply to this DB instance.", + "markdownDescription": "Tags to assign to the DB instance.", "title": "Tags", "type": "array" }, @@ -225484,7 +225474,7 @@ "type": "boolean" }, "UseLatestRestorableTime": { - "markdownDescription": "Specifies whether the DB instance is restored from the latest backup time. By default, the DB instance isn't restored from the latest backup time.\n\nConstraints:\n\n- Can't be specified if the `RestoreTime` parameter is provided.", + "markdownDescription": "Specifies whether the DB instance is restored from the latest backup time. By default, the DB instance isn't restored from the latest backup time. This parameter applies to point-in-time recovery. For more information, see [Restoring a DB instance to a specified time](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_PIT.html) in the in the *Amazon RDS User Guide* .\n\nConstraints:\n\n- Can't be specified if the `RestoreTime` parameter is provided.", "title": "UseLatestRestorableTime", "type": "boolean" }, @@ -225585,7 +225575,7 @@ "type": "string" }, "SecretArn": { - "markdownDescription": "The Amazon Resource Name (ARN) of the secret.", + "markdownDescription": "The Amazon Resource Name (ARN) of the secret. This parameter is a return value that you can retrieve using the `Fn::GetAtt` intrinsic function. For more information, see [Return values](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-rds-dbinstance.html#aws-resource-rds-dbinstance-return-values) .", "title": "SecretArn", "type": "string" } @@ -225654,12 +225644,12 @@ "type": "string" }, "Family": { - "markdownDescription": "The DB parameter group family name. A DB parameter group can be associated with one and only one DB parameter group family, and can be applied only to a DB instance running a DB engine and engine version compatible with that DB parameter group family.\n\n> The DB parameter group family can't be changed when updating a DB parameter group. \n\nTo list all of the available parameter group families, use the following command:\n\n`aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\"`\n\nThe output contains duplicates.\n\nFor more information, see `[CreateDBParameterGroup](https://docs.aws.amazon.com//AmazonRDS/latest/APIReference/API_CreateDBParameterGroup.html)` .", + "markdownDescription": "The DB parameter group family name. A DB parameter group can be associated with one and only one DB parameter group family, and can be applied only to a DB instance running a database engine and engine version compatible with that DB parameter group family.\n\nTo list all of the available parameter group families for a DB engine, use the following command:\n\n`aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\" --engine `\n\nFor example, to list all of the available parameter group families for the MySQL DB engine, use the following command:\n\n`aws rds describe-db-engine-versions --query \"DBEngineVersions[].DBParameterGroupFamily\" --engine mysql`\n\n> The output contains duplicates. \n\nThe following are the valid DB engine values:\n\n- `aurora-mysql`\n- `aurora-postgresql`\n- `db2-ae`\n- `db2-se`\n- `mysql`\n- `oracle-ee`\n- `oracle-ee-cdb`\n- `oracle-se2`\n- `oracle-se2-cdb`\n- `postgres`\n- `sqlserver-ee`\n- `sqlserver-se`\n- `sqlserver-ex`\n- `sqlserver-web`", "title": "Family", "type": "string" }, "Parameters": { - "markdownDescription": "An array of parameter names and values for the parameter update. At least one parameter name and value must be supplied. Subsequent arguments are optional.\n\nRDS for Db2 requires you to bring your own Db2 license. You must enter your IBM customer ID ( `rds.ibm_customer_id` ) and site number ( `rds.ibm_site_id` ) before starting a Db2 instance.\n\nFor more information about DB parameters and DB parameter groups for Amazon RDS DB engines, see [Working with DB Parameter Groups](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_WorkingWithParamGroups.html) in the *Amazon RDS User Guide* .\n\nFor more information about DB cluster and DB instance parameters and parameter groups for Amazon Aurora DB engines, see [Working with DB Parameter Groups and DB Cluster Parameter Groups](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_WorkingWithParamGroups.html) in the *Amazon Aurora User Guide* .\n\n> AWS CloudFormation doesn't support specifying an apply method for each individual parameter. The default apply method for each parameter is used.", + "markdownDescription": "An array of parameter names and values for the parameter update. You must specify at least one parameter name and value.\n\nFor more information about parameter groups, see [Working with parameter groups](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_WorkingWithParamGroups.html) in the *Amazon RDS User Guide* , or [Working with parameter groups](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_WorkingWithParamGroups.html) in the *Amazon Aurora User Guide* .\n\n> AWS CloudFormation doesn't support specifying an apply method for each individual parameter. The default apply method for each parameter is used.", "title": "Parameters", "type": "object" }, @@ -225667,7 +225657,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "An optional array of key-value pairs to apply to this DB parameter group.\n\n> Currently, this is the only property that supports drift detection.", + "markdownDescription": "Tags to assign to the DB parameter group.", "title": "Tags", "type": "array" } @@ -225753,7 +225743,7 @@ "type": "boolean" }, "EngineFamily": { - "markdownDescription": "The kinds of databases that the proxy can connect to. This value determines which database network protocol the proxy recognizes when it interprets network traffic to and from the database. For Aurora MySQL, RDS for MariaDB, and RDS for MySQL databases, specify `MYSQL` . For Aurora PostgreSQL and RDS for PostgreSQL databases, specify `POSTGRESQL` . For RDS for Microsoft SQL Server, specify `SQLSERVER` .\n\n*Valid Values* : `MYSQL` | `POSTGRESQL` | `SQLSERVER`", + "markdownDescription": "The kinds of databases that the proxy can connect to. This value determines which database network protocol the proxy recognizes when it interprets network traffic to and from the database. For Aurora MySQL, RDS for MariaDB, and RDS for MySQL databases, specify `MYSQL` . For Aurora PostgreSQL and RDS for PostgreSQL databases, specify `POSTGRESQL` . For RDS for Microsoft SQL Server, specify `SQLSERVER` .", "title": "EngineFamily", "type": "string" }, @@ -225831,7 +225821,7 @@ "additionalProperties": false, "properties": { "AuthScheme": { - "markdownDescription": "The type of authentication that the proxy uses for connections from the proxy to the underlying database.\n\nValid Values: `SECRETS`", + "markdownDescription": "The type of authentication that the proxy uses for connections from the proxy to the underlying database.", "title": "AuthScheme", "type": "string" }, @@ -225846,7 +225836,7 @@ "type": "string" }, "IAMAuth": { - "markdownDescription": "Whether to require or disallow AWS Identity and Access Management (IAM) authentication for connections to the proxy. The `ENABLED` value is valid only for proxies with RDS for Microsoft SQL Server.\n\nValid Values: `ENABLED | DISABLED | REQUIRED`", + "markdownDescription": "A value that indicates whether to require or disallow AWS Identity and Access Management (IAM) authentication for connections to the proxy. The `ENABLED` value is valid only for proxies with RDS for Microsoft SQL Server.", "title": "IAMAuth", "type": "string" }, @@ -225862,12 +225852,12 @@ "additionalProperties": false, "properties": { "Key": { - "markdownDescription": "A key is the required name of the tag. The string value can be 1-128 Unicode characters in length and can't be prefixed with `aws:` . The string can contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\").", + "markdownDescription": "A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and can't be prefixed with `aws:` or `rds:` . The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$\").", "title": "Key", "type": "string" }, "Value": { - "markdownDescription": "A value is the optional value of the tag. The string value can be 1-256 Unicode characters in length and can't be prefixed with `aws:` . The string can contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\").", + "markdownDescription": "A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and can't be prefixed with `aws:` or `rds:` . The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$\").", "title": "Value", "type": "string" } @@ -225928,7 +225918,7 @@ "type": "array" }, "TargetRole": { - "markdownDescription": "A value that indicates whether the DB proxy endpoint can be used for read/write or read-only operations.\n\nValid Values: `READ_WRITE | READ_ONLY`", + "markdownDescription": "A value that indicates whether the DB proxy endpoint can be used for read/write or read-only operations.", "title": "TargetRole", "type": "string" }, @@ -225981,12 +225971,12 @@ "additionalProperties": false, "properties": { "Key": { - "markdownDescription": "A value is the optional value of the tag. The string value can be 1-256 Unicode characters in length and can't be prefixed with `aws:` . The string can contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\").", + "markdownDescription": "A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and can't be prefixed with `aws:` or `rds:` . The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$\").", "title": "Key", "type": "string" }, "Value": { - "markdownDescription": "Metadata assigned to a DB instance consisting of a key-value pair.", + "markdownDescription": "A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and can't be prefixed with `aws:` or `rds:` . The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]*)$\").", "title": "Value", "type": "string" } @@ -226030,7 +226020,7 @@ "properties": { "ConnectionPoolConfigurationInfo": { "$ref": "#/definitions/AWS::RDS::DBProxyTargetGroup.ConnectionPoolConfigurationInfoFormat", - "markdownDescription": "Settings that control the size and behavior of the connection pool associated with a `DBProxyTargetGroup` .", + "markdownDescription": "Displays the settings that control the size and behavior of the connection pool associated with a `DBProxyTarget` .", "title": "ConnectionPoolConfigurationInfo" }, "DBClusterIdentifiers": { @@ -226091,7 +226081,7 @@ "additionalProperties": false, "properties": { "ConnectionBorrowTimeout": { - "markdownDescription": "The number of seconds for a proxy to wait for a connection to become available in the connection pool. This setting only applies when the proxy has opened its maximum number of connections and all connections are busy with client sessions. For an unlimited wait time, specify `0` .\n\nDefault: `120`\n\nConstraints:\n\n- Must be between 0 and 3600.", + "markdownDescription": "The number of seconds for a proxy to wait for a connection to become available in the connection pool. This setting only applies when the proxy has opened its maximum number of connections and all connections are busy with client sessions.\n\nDefault: `120`\n\nConstraints:\n\n- Must be between 0 and 3600.", "title": "ConnectionBorrowTimeout", "type": "number" }, @@ -226165,7 +226155,7 @@ "type": "array" }, "EC2VpcId": { - "markdownDescription": "The identifier of an Amazon VPC. This property indicates the VPC that this DB security group belongs to.\n\n> The `EC2VpcId` property is for backward compatibility with older regions, and is no longer recommended for providing security information to an RDS DB instance.", + "markdownDescription": "The identifier of an Amazon virtual private cloud (VPC). This property indicates the VPC that this DB security group belongs to.\n\n> This property is included for backwards compatibility and is no longer recommended for providing security information to an RDS DB instance.", "title": "EC2VpcId", "type": "string" }, @@ -226178,7 +226168,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "An optional array of key-value pairs to apply to this DB security group.", + "markdownDescription": "Metadata assigned to an Amazon RDS resource consisting of a key-value pair.\n\nFor more information, see [Tagging Amazon RDS resources](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) in the *Amazon RDS User Guide* or [Tagging Amazon Aurora and Amazon RDS resources](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_Tagging.html) in the *Amazon Aurora User Guide* .", "title": "Tags", "type": "array" } @@ -226364,7 +226354,7 @@ "type": "string" }, "DBSubnetGroupName": { - "markdownDescription": "The name for the DB subnet group. This value is stored as a lowercase string.\n\nConstraints: Must contain no more than 255 lowercase alphanumeric characters or hyphens. Must not be \"Default\".\n\nExample: `mysubnetgroup`", + "markdownDescription": "The name for the DB subnet group. This value is stored as a lowercase string.\n\nConstraints:\n\n- Must contain no more than 255 letters, numbers, periods, underscores, spaces, or hyphens.\n- Must not be default.\n- First character must be a letter.\n\nExample: `mydbsubnetgroup`", "title": "DBSubnetGroupName", "type": "string" }, @@ -226380,7 +226370,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "An optional array of key-value pairs to apply to this DB subnet group.", + "markdownDescription": "Tags to assign to the DB subnet group.", "title": "Tags", "type": "array" } @@ -226469,12 +226459,12 @@ "items": { "type": "string" }, - "markdownDescription": "The list of identifiers of the event sources for which events are returned. If not specified, then all sources are included in the response. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens. It can't end with a hyphen or contain two consecutive hyphens.\n\nConstraints:\n\n- If a `SourceIds` value is supplied, `SourceType` must also be provided.\n- If the source type is a DB instance, a `DBInstanceIdentifier` value must be supplied.\n- If the source type is a DB cluster, a `DBClusterIdentifier` value must be supplied.\n- If the source type is a DB parameter group, a `DBParameterGroupName` value must be supplied.\n- If the source type is a DB security group, a `DBSecurityGroupName` value must be supplied.\n- If the source type is a DB snapshot, a `DBSnapshotIdentifier` value must be supplied.\n- If the source type is a DB cluster snapshot, a `DBClusterSnapshotIdentifier` value must be supplied.", + "markdownDescription": "The list of identifiers of the event sources for which events are returned. If not specified, then all sources are included in the response. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens. It can't end with a hyphen or contain two consecutive hyphens.\n\nConstraints:\n\n- If `SourceIds` are supplied, `SourceType` must also be provided.\n- If the source type is a DB instance, a `DBInstanceIdentifier` value must be supplied.\n- If the source type is a DB cluster, a `DBClusterIdentifier` value must be supplied.\n- If the source type is a DB parameter group, a `DBParameterGroupName` value must be supplied.\n- If the source type is a DB security group, a `DBSecurityGroupName` value must be supplied.\n- If the source type is a DB snapshot, a `DBSnapshotIdentifier` value must be supplied.\n- If the source type is a DB cluster snapshot, a `DBClusterSnapshotIdentifier` value must be supplied.\n- If the source type is an RDS Proxy, a `DBProxyName` value must be supplied.", "title": "SourceIds", "type": "array" }, "SourceType": { - "markdownDescription": "The type of source that is generating the events. For example, if you want to be notified of events generated by a DB instance, set this parameter to `db-instance` . If this value isn't specified, all events are returned.\n\nValid values: `db-instance` | `db-cluster` | `db-parameter-group` | `db-security-group` | `db-snapshot` | `db-cluster-snapshot`", + "markdownDescription": "The type of source that is generating the events. For example, if you want to be notified of events generated by a DB instance, you set this parameter to `db-instance` . For RDS Proxy events, specify `db-proxy` . If this value isn't specified, all events are returned.\n\nValid Values: `db-instance | db-cluster | db-parameter-group | db-security-group | db-snapshot | db-cluster-snapshot | db-proxy | zero-etl | custom-engine-version | blue-green-deployment`", "title": "SourceType", "type": "string" }, @@ -226681,7 +226671,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "A list of tags. For more information, see [Tagging Amazon RDS Resources](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) in the *Amazon RDS User Guide.* .", + "markdownDescription": "An optional array of key-value pairs to apply to this integration.", "title": "Tags", "type": "array" }, @@ -226767,7 +226757,7 @@ "items": { "$ref": "#/definitions/AWS::RDS::OptionGroup.OptionConfiguration" }, - "markdownDescription": "A list of options and the settings for each option.", + "markdownDescription": "A list of all available options for an option group.", "title": "OptionConfigurations", "type": "array" }, @@ -226785,7 +226775,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "An optional array of key-value pairs to apply to this option group.", + "markdownDescription": "Tags to assign to the option group.", "title": "Tags", "type": "array" } @@ -226825,7 +226815,7 @@ "items": { "type": "string" }, - "markdownDescription": "A list of DBSecurityGroupMembership name strings used for this option.", + "markdownDescription": "A list of DB security groups used for this option.", "title": "DBSecurityGroupMemberships", "type": "array" }, @@ -226856,7 +226846,7 @@ "items": { "type": "string" }, - "markdownDescription": "A list of VpcSecurityGroupMembership name strings used for this option.", + "markdownDescription": "A list of VPC security group names used for this option.", "title": "VpcSecurityGroupMemberships", "type": "array" } @@ -234971,7 +234961,7 @@ "type": "string" }, "Protocol": { - "markdownDescription": "The protocols for the Resolver endpoints. DoH-FIPS is applicable for inbound endpoints only.\n\nFor an inbound endpoint you can apply the protocols as follows:\n\n- Do53 and DoH in combination.\n- Do53 and DoH-FIPS in combination.\n- Do53 alone.\n- DoH alone.\n- DoH-FIPS alone.\n- None, which is treated as Do53.\n\nFor an outbound endpoint you can apply the protocols as follows:\n\n- Do53 and DoH in combination.\n- Do53 alone.\n- DoH alone.\n- None, which is treated as Do53.", + "markdownDescription": "The protocols for the target address. The protocol you choose needs to be supported by the outbound endpoint of the Resolver rule.", "title": "Protocol", "type": "string" } @@ -235610,7 +235600,7 @@ }, "VersioningConfiguration": { "$ref": "#/definitions/AWS::S3::Bucket.VersioningConfiguration", - "markdownDescription": "Enables multiple versions of all objects in this bucket. You might enable versioning to prevent objects from being deleted or overwritten by mistake or to archive objects so that you can retrieve previous versions of them.", + "markdownDescription": "Enables multiple versions of all objects in this bucket. You might enable versioning to prevent objects from being deleted or overwritten by mistake or to archive objects so that you can retrieve previous versions of them.\n\n> When you enable versioning on a bucket for the first time, it might take a short amount of time for the change to be fully propagated. We recommend that you wait for 15 minutes after enabling versioning before issuing write operations ( `PUT` or `DELETE` ) on objects in the bucket.", "title": "VersioningConfiguration" }, "WebsiteConfiguration": { @@ -236057,7 +236047,7 @@ "items": { "$ref": "#/definitions/AWS::S3::Bucket.Rule" }, - "markdownDescription": "A lifecycle rule for individual objects in an Amazon S3 bucket.", + "markdownDescription": "Specifies lifecycle configuration rules for an Amazon S3 bucket.", "title": "Rules", "type": "array" } @@ -236758,12 +236748,12 @@ "additionalProperties": false, "properties": { "KMSMasterKeyID": { - "markdownDescription": "AWS Key Management Service (KMS) customer AWS KMS key ID to use for the default encryption. This parameter is allowed if and only if `SSEAlgorithm` is set to `aws:kms` or `aws:kms:dsse` .\n\nYou can specify the key ID, key alias, or the Amazon Resource Name (ARN) of the KMS key.\n\n- Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab`\n- Key ARN: `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab`\n- Key Alias: `alias/alias-name`\n\nIf you use a key ID, you can run into a LogDestination undeliverable error when creating a VPC flow log.\n\nIf you are using encryption with cross-account or AWS service operations you must use a fully qualified KMS key ARN. For more information, see [Using encryption for cross-account operations](https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html#bucket-encryption-update-bucket-policy) .\n\n> Amazon S3 only supports symmetric encryption KMS keys. For more information, see [Asymmetric keys in AWS KMS](https://docs.aws.amazon.com//kms/latest/developerguide/symmetric-asymmetric.html) in the *AWS Key Management Service Developer Guide* .", + "markdownDescription": "AWS Key Management Service (KMS) customer managed key ID to use for the default encryption.\n\n> - *General purpose buckets* - This parameter is allowed if and only if `SSEAlgorithm` is set to `aws:kms` or `aws:kms:dsse` .\n> - *Directory buckets* - This parameter is allowed if and only if `SSEAlgorithm` is set to `aws:kms` . \n\nYou can specify the key ID, key alias, or the Amazon Resource Name (ARN) of the KMS key.\n\n- Key ID: `1234abcd-12ab-34cd-56ef-1234567890ab`\n- Key ARN: `arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab`\n- Key Alias: `alias/alias-name`\n\nIf you are using encryption with cross-account or AWS service operations, you must use a fully qualified KMS key ARN. For more information, see [Using encryption for cross-account operations](https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html#bucket-encryption-update-bucket-policy) .\n\n> - *General purpose buckets* - If you're specifying a customer managed KMS key, we recommend using a fully qualified KMS key ARN. If you use a KMS key alias instead, then AWS KMS resolves the key within the requester\u2019s account. This behavior can result in data that's encrypted with a KMS key that belongs to the requester, and not the bucket owner. Also, if you use a key ID, you can run into a LogDestination undeliverable error when creating a VPC flow log.\n> - *Directory buckets* - When you specify an [AWS KMS customer managed key](https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk) for encryption in your directory bucket, only use the key ID or key ARN. The key alias format of the KMS key isn't supported. > Amazon S3 only supports symmetric encryption KMS keys. For more information, see [Asymmetric keys in AWS KMS](https://docs.aws.amazon.com//kms/latest/developerguide/symmetric-asymmetric.html) in the *AWS Key Management Service Developer Guide* .", "title": "KMSMasterKeyID", "type": "string" }, "SSEAlgorithm": { - "markdownDescription": "Server-side encryption algorithm to use for the default encryption.", + "markdownDescription": "Server-side encryption algorithm to use for the default encryption.\n\n> For directory buckets, there are only two supported values for server-side encryption: `AES256` and `aws:kms` .", "title": "SSEAlgorithm", "type": "string" } @@ -238077,7 +238067,7 @@ "additionalProperties": false, "properties": { "BucketName": { - "markdownDescription": "A name for the bucket. The bucket name must contain only lowercase letters, numbers, and hyphens (-). A directory bucket name must be unique in the chosen Availability Zone. The bucket name must also follow the format `*bucket_base_name* -- *az_id* --x-s3` (for example, `*DOC-EXAMPLE-BUCKET* -- *usw2-az1* --x-s3` ). If you don't specify a name, AWS CloudFormation generates a unique ID and uses that ID for the bucket name. For information about bucket naming restrictions, see [Directory bucket naming rules](https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) in the *Amazon S3 User Guide* .\n\n> If you specify a name, you can't perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you need to replace the resource, specify a new name.", + "markdownDescription": "A name for the bucket. The bucket name must contain only lowercase letters, numbers, and hyphens (-). A directory bucket name must be unique in the chosen Availability Zone. The bucket name must also follow the format `*bucket_base_name* -- *az_id* --x-s3` (for example, `*bucket_base_name* -- *usw2-az1* --x-s3` ). If you don't specify a name, AWS CloudFormation generates a unique ID and uses that ID for the bucket name. For information about bucket naming restrictions, see [Directory bucket naming rules](https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) in the *Amazon S3 User Guide* .\n\n> If you specify a name, you can't perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you need to replace the resource, specify a new name.", "title": "BucketName", "type": "string" }, @@ -240065,7 +240055,7 @@ "type": "string" }, "KmsKeyArn": { - "markdownDescription": "The customer master key that Amazon SES should use to encrypt your emails before saving them to the Amazon S3 bucket. You can use the default master key or a custom master key that you created in AWS KMS as follows:\n\n- To use the default master key, provide an ARN in the form of `arn:aws:kms:REGION:ACCOUNT-ID-WITHOUT-HYPHENS:alias/aws/ses` . For example, if your AWS account ID is 123456789012 and you want to use the default master key in the US West (Oregon) Region, the ARN of the default master key would be `arn:aws:kms:us-west-2:123456789012:alias/aws/ses` . If you use the default master key, you don't need to perform any extra steps to give Amazon SES permission to use the key.\n- To use a custom master key that you created in AWS KMS, provide the ARN of the master key and ensure that you add a statement to your key's policy to give Amazon SES permission to use it. For more information about giving permissions, see the [Amazon SES Developer Guide](https://docs.aws.amazon.com/ses/latest/dg/receiving-email-permissions.html) .\n\nFor more information about key policies, see the [AWS KMS Developer Guide](https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html) . If you do not specify a master key, Amazon SES does not encrypt your emails.\n\n> Your mail is encrypted by Amazon SES using the Amazon S3 encryption client before the mail is submitted to Amazon S3 for storage. It is not encrypted using Amazon S3 server-side encryption. This means that you must use the Amazon S3 encryption client to decrypt the email after retrieving it from Amazon S3, as the service has no access to use your AWS KMS keys for decryption. This encryption client is currently available with the [AWS SDK for Java](https://docs.aws.amazon.com/sdk-for-java/) and [AWS SDK for Ruby](https://docs.aws.amazon.com/sdk-for-ruby/) only. For more information about client-side encryption using AWS KMS master keys, see the [Amazon S3 Developer Guide](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingClientSideEncryption.html) .", + "markdownDescription": "The customer managed key that Amazon SES should use to encrypt your emails before saving them to the Amazon S3 bucket. You can use the AWS managed key or a customer managed key that you created in AWS KMS as follows:\n\n- To use the AWS managed key, provide an ARN in the form of `arn:aws:kms:REGION:ACCOUNT-ID-WITHOUT-HYPHENS:alias/aws/ses` . For example, if your AWS account ID is 123456789012 and you want to use the AWS managed key in the US West (Oregon) Region, the ARN of the AWS managed key would be `arn:aws:kms:us-west-2:123456789012:alias/aws/ses` . If you use the AWS managed key, you don't need to perform any extra steps to give Amazon SES permission to use the key.\n- To use a customer managed key that you created in AWS KMS, provide the ARN of the customer managed key and ensure that you add a statement to your key's policy to give Amazon SES permission to use it. For more information about giving permissions, see the [Amazon SES Developer Guide](https://docs.aws.amazon.com/ses/latest/dg/receiving-email-permissions.html) .\n\nFor more information about key policies, see the [AWS KMS Developer Guide](https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html) . If you do not specify an AWS KMS key, Amazon SES does not encrypt your emails.\n\n> Your mail is encrypted by Amazon SES using the Amazon S3 encryption client before the mail is submitted to Amazon S3 for storage. It is not encrypted using Amazon S3 server-side encryption. This means that you must use the Amazon S3 encryption client to decrypt the email after retrieving it from Amazon S3, as the service has no access to use your AWS KMS keys for decryption. This encryption client is currently available with the [AWS SDK for Java](https://docs.aws.amazon.com/sdk-for-java/) and [AWS SDK for Ruby](https://docs.aws.amazon.com/sdk-for-ruby/) only. For more information about client-side encryption using AWS KMS managed keys, see the [Amazon S3 Developer Guide](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingClientSideEncryption.html) .", "title": "KmsKeyArn", "type": "string" }, @@ -240538,12 +240528,12 @@ "additionalProperties": false, "properties": { "ArchivePolicy": { - "markdownDescription": "The archive policy determines the number of days Amazon SNS retains messages. You can set a retention period from 1 to 365 days.", + "markdownDescription": "The `ArchivePolicy` determines the number of days Amazon SNS retains messages in FIFO topics. You can set a retention period ranging from 1 to 365 days. This property is only applicable to FIFO topics; attempting to use it with standard topics will result in a creation failure.", "title": "ArchivePolicy", "type": "object" }, "ContentBasedDeduplication": { - "markdownDescription": "Enables content-based deduplication for FIFO topics.\n\n- By default, `ContentBasedDeduplication` is set to `false` . If you create a FIFO topic and this attribute is `false` , you must specify a value for the `MessageDeduplicationId` parameter for the [Publish](https://docs.aws.amazon.com/sns/latest/api/API_Publish.html) action.\n- When you set `ContentBasedDeduplication` to `true` , Amazon SNS uses a SHA-256 hash to generate the `MessageDeduplicationId` using the body of the message (but not the attributes of the message).\n\n(Optional) To override the generated value, you can specify a value for the the `MessageDeduplicationId` parameter for the `Publish` action.", + "markdownDescription": "`ContentBasedDeduplication` enables deduplication of messages based on their content for FIFO topics. By default, this property is set to false. If you create a FIFO topic with `ContentBasedDeduplication` set to false, you must provide a `MessageDeduplicationId` for each `Publish` action. When set to true, Amazon SNS automatically generates a `MessageDeduplicationId` using a SHA-256 hash of the message body (excluding message attributes). You can optionally override this generated value by specifying a `MessageDeduplicationId` in the `Publish` action. Note that this property only applies to FIFO topics; using it with standard topics will cause the creation to fail.", "title": "ContentBasedDeduplication", "type": "boolean" }, @@ -240878,7 +240868,7 @@ "type": "number" }, "FifoQueue": { - "markdownDescription": "If set to true, creates a FIFO queue. If you don't specify this property, Amazon SQS creates a standard queue. For more information, see [FIFO queues](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html) in the *Amazon SQS Developer Guide* .", + "markdownDescription": "If set to true, creates a FIFO queue. If you don't specify this property, Amazon SQS creates a standard queue. For more information, see [Amazon SQS FIFO queues](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-fifo-queues.html) in the *Amazon SQS Developer Guide* .", "title": "FifoQueue", "type": "boolean" }, @@ -240893,7 +240883,7 @@ "type": "number" }, "KmsMasterKeyId": { - "markdownDescription": "The ID of an AWS Key Management Service (KMS) for Amazon SQS , or a custom KMS. To use the AWS managed KMS for Amazon SQS , specify a (default) alias ARN, alias name (e.g. `alias/aws/sqs` ), key ARN, or key ID. For more information, see the following:\n\n- [Encryption at rest](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html) in the *Amazon SQS Developer Guide*\n- [CreateQueue](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_CreateQueue.html) in the *Amazon SQS API Reference*\n- [Request Parameters](https://docs.aws.amazon.com/kms/latest/APIReference/API_DescribeKey.html#API_DescribeKey_RequestParameters) in the *AWS Key Management Service API Reference*\n- The Key Management Service (KMS) section of the [AWS Key Management Service Best Practices](https://docs.aws.amazon.com/https://d0.awsstatic.com/whitepapers/aws-kms-best-practices.pdf) whitepaper", + "markdownDescription": "The ID of an AWS Key Management Service (KMS) for Amazon SQS , or a custom KMS. To use the AWS managed KMS for Amazon SQS , specify a (default) alias ARN, alias name (for example `alias/aws/sqs` ), key ARN, or key ID. For more information, see the following:\n\n- [Encryption at rest](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html) in the *Amazon SQS Developer Guide*\n- [CreateQueue](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_CreateQueue.html) in the *Amazon SQS API Reference*\n- [Request Parameters](https://docs.aws.amazon.com/kms/latest/APIReference/API_DescribeKey.html#API_DescribeKey_RequestParameters) in the *AWS Key Management Service API Reference*\n- The Key Management Service (KMS) section of the [Security best practices for AWS Key Management Service](https://docs.aws.amazon.com/kms/latest/developerguide/best-practices.html) in the *AWS Key Management Service Developer Guide*", "title": "KmsMasterKeyId", "type": "string" }, @@ -240908,7 +240898,7 @@ "type": "number" }, "QueueName": { - "markdownDescription": "A name for the queue. To create a FIFO queue, the name of your FIFO queue must end with the `.fifo` suffix. For more information, see [FIFO queues](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html) in the *Amazon SQS Developer Guide* .\n\nIf you don't specify a name, AWS CloudFormation generates a unique physical ID and uses that ID for the queue name. For more information, see [Name type](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-name.html) in the *AWS CloudFormation User Guide* .\n\n> If you specify a name, you can't perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you must replace the resource, specify a new name.", + "markdownDescription": "A name for the queue. To create a FIFO queue, the name of your FIFO queue must end with the `.fifo` suffix. For more information, see [Amazon SQS FIFO queues](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-fifo-queues.html) in the *Amazon SQS Developer Guide* .\n\nIf you don't specify a name, AWS CloudFormation generates a unique physical ID and uses that ID for the queue name. For more information, see [Name type](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-name.html) in the *AWS CloudFormation User Guide* .\n\n> If you specify a name, you can't perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you must replace the resource, specify a new name.", "title": "QueueName", "type": "string" }, @@ -241463,7 +241453,7 @@ "items": { "type": "string" }, - "markdownDescription": "The value of a key-value pair that identifies the location of an attachment to a document. The format for *Value* depends on the type of key you specify.\n\n- For the key *SourceUrl* , the value is an S3 bucket location. For example:\n\n`\"Values\": [ \"s3://doc-example-bucket/my-folder\" ]`\n- For the key *S3FileUrl* , the value is a file in an S3 bucket. For example:\n\n`\"Values\": [ \"s3://doc-example-bucket/my-folder/my-file.py\" ]`\n- For the key *AttachmentReference* , the value is constructed from the name of another SSM document in your account, a version number of that document, and a file attached to that document version that you want to reuse. For example:\n\n`\"Values\": [ \"MyOtherDocument/3/my-other-file.py\" ]`\n\nHowever, if the SSM document is shared with you from another account, the full SSM document ARN must be specified instead of the document name only. For example:\n\n`\"Values\": [ \"arn:aws:ssm:us-east-2:111122223333:document/OtherAccountDocument/3/their-file.py\" ]`", + "markdownDescription": "The value of a key-value pair that identifies the location of an attachment to a document. The format for *Value* depends on the type of key you specify.\n\n- For the key *SourceUrl* , the value is an S3 bucket location. For example:\n\n`\"Values\": [ \"s3://amzn-s3-demo-bucket/my-prefix\" ]`\n- For the key *S3FileUrl* , the value is a file in an S3 bucket. For example:\n\n`\"Values\": [ \"s3://amzn-s3-demo-bucket/my-prefix/my-file.py\" ]`\n- For the key *AttachmentReference* , the value is constructed from the name of another SSM document in your account, a version number of that document, and a file attached to that document version that you want to reuse. For example:\n\n`\"Values\": [ \"MyOtherDocument/3/my-other-file.py\" ]`\n\nHowever, if the SSM document is shared with you from another account, the full SSM document ARN must be specified instead of the document name only. For example:\n\n`\"Values\": [ \"arn:aws:ssm:us-east-2:111122223333:document/OtherAccountDocument/3/their-file.py\" ]`", "title": "Values", "type": "array" } @@ -241801,7 +241791,7 @@ "type": "number" }, "ServiceRoleArn": { - "markdownDescription": "The Amazon Resource Name (ARN) of the IAM service role for AWS Systems Manager to assume when running a maintenance window task. If you do not specify a service role ARN, Systems Manager uses a service-linked role in your account. If no appropriate service-linked role for Systems Manager exists in your account, it is created when you run `RegisterTaskWithMaintenanceWindow` .\n\nHowever, for an improved security posture, we strongly recommend creating a custom policy and custom service role for running your maintenance window tasks. The policy can be crafted to provide only the permissions needed for your particular maintenance window tasks. For more information, see [Setting up maintenance windows](https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-maintenance-permissions.html) in the in the *AWS Systems Manager User Guide* .", + "markdownDescription": "The Amazon Resource Name (ARN) of the IAM service role for AWS Systems Manager to assume when running a maintenance window task. If you do not specify a service role ARN, Systems Manager uses a service-linked role in your account. If no appropriate service-linked role for Systems Manager exists in your account, it is created when you run `RegisterTaskWithMaintenanceWindow` .\n\nHowever, for an improved security posture, we strongly recommend creating a custom policy and custom service role for running your maintenance window tasks. The policy can be crafted to provide only the permissions needed for your particular maintenance window tasks. For more information, see [Setting up Maintenance Windows](https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-maintenance-permissions.html) in the in the *AWS Systems Manager User Guide* .", "title": "ServiceRoleArn", "type": "string" }, @@ -241995,7 +241985,7 @@ "type": "object" }, "ServiceRoleArn": { - "markdownDescription": "The Amazon Resource Name (ARN) of the IAM service role for AWS Systems Manager to assume when running a maintenance window task. If you do not specify a service role ARN, Systems Manager uses a service-linked role in your account. If no appropriate service-linked role for Systems Manager exists in your account, it is created when you run `RegisterTaskWithMaintenanceWindow` .\n\nHowever, for an improved security posture, we strongly recommend creating a custom policy and custom service role for running your maintenance window tasks. The policy can be crafted to provide only the permissions needed for your particular maintenance window tasks. For more information, see [Setting up maintenance windows](https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-maintenance-permissions.html) in the in the *AWS Systems Manager User Guide* .", + "markdownDescription": "The Amazon Resource Name (ARN) of the IAM service role for AWS Systems Manager to assume when running a maintenance window task. If you do not specify a service role ARN, Systems Manager uses a service-linked role in your account. If no appropriate service-linked role for Systems Manager exists in your account, it is created when you run `RegisterTaskWithMaintenanceWindow` .\n\nHowever, for an improved security posture, we strongly recommend creating a custom policy and custom service role for running your maintenance window tasks. The policy can be crafted to provide only the permissions needed for your particular maintenance window tasks. For more information, see [Setting up Maintenance Windows](https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-maintenance-permissions.html) in the in the *AWS Systems Manager User Guide* .", "title": "ServiceRoleArn", "type": "string" }, @@ -242257,7 +242247,7 @@ "items": { "type": "string" }, - "markdownDescription": "A list of explicitly approved patches for the baseline.\n\nFor information about accepted formats for lists of approved patches and rejected patches, see [About package name formats for approved and rejected patch lists](https://docs.aws.amazon.com/systems-manager/latest/userguide/patch-manager-approved-rejected-package-name-formats.html) in the *AWS Systems Manager User Guide* .", + "markdownDescription": "A list of explicitly approved patches for the baseline.\n\nFor information about accepted formats for lists of approved patches and rejected patches, see [Package name formats for approved and rejected patch lists](https://docs.aws.amazon.com/systems-manager/latest/userguide/patch-manager-approved-rejected-package-name-formats.html) in the *AWS Systems Manager User Guide* .", "title": "ApprovedPatches", "type": "array" }, @@ -242281,7 +242271,7 @@ }, "GlobalFilters": { "$ref": "#/definitions/AWS::SSM::PatchBaseline.PatchFilterGroup", - "markdownDescription": "A set of global filters used to include patches in the baseline.", + "markdownDescription": "A set of global filters used to include patches in the baseline.\n\n> The `GlobalFilters` parameter can be configured only by using the AWS CLI or an AWS SDK. It can't be configured from the Patch Manager console, and its value isn't displayed in the console.", "title": "GlobalFilters" }, "Name": { @@ -242306,12 +242296,12 @@ "items": { "type": "string" }, - "markdownDescription": "A list of explicitly rejected patches for the baseline.\n\nFor information about accepted formats for lists of approved patches and rejected patches, see [About package name formats for approved and rejected patch lists](https://docs.aws.amazon.com/systems-manager/latest/userguide/patch-manager-approved-rejected-package-name-formats.html) in the *AWS Systems Manager User Guide* .", + "markdownDescription": "A list of explicitly rejected patches for the baseline.\n\nFor information about accepted formats for lists of approved patches and rejected patches, see [Package name formats for approved and rejected patch lists](https://docs.aws.amazon.com/systems-manager/latest/userguide/patch-manager-approved-rejected-package-name-formats.html) in the *AWS Systems Manager User Guide* .", "title": "RejectedPatches", "type": "array" }, "RejectedPatchesAction": { - "markdownDescription": "The action for Patch Manager to take on patches included in the `RejectedPackages` list.\n\n- *`ALLOW_AS_DEPENDENCY`* : A package in the `Rejected` patches list is installed only if it is a dependency of another package. It is considered compliant with the patch baseline, and its status is reported as `InstalledOther` . This is the default action if no option is specified.\n- *BLOCK* : Packages in the *Rejected patches* list, and packages that include them as dependencies, aren't installed by Patch Manager under any circumstances. If a package was installed before it was added to the *Rejected patches* list, or is installed outside of Patch Manager afterward, it's considered noncompliant with the patch baseline and its status is reported as *InstalledRejected* .", + "markdownDescription": "The action for Patch Manager to take on patches included in the `RejectedPackages` list.\n\n- **ALLOW_AS_DEPENDENCY** - *Linux and macOS* : A package in the rejected patches list is installed only if it is a dependency of another package. It is considered compliant with the patch baseline, and its status is reported as `INSTALLED_OTHER` . This is the default action if no option is specified.\n\n*Windows Server* : Windows Server doesn't support the concept of package dependencies. If a package in the rejected patches list and already installed on the node, its status is reported as `INSTALLED_OTHER` . Any package not already installed on the node is skipped. This is the default action if no option is specified.\n- **BLOCK** - *All OSs* : Packages in the rejected patches list, and packages that include them as dependencies, aren't installed by Patch Manager under any circumstances. If a package was installed before it was added to the rejected patches list, or is installed outside of Patch Manager afterward, it's considered noncompliant with the patch baseline and its status is reported as `INSTALLED_REJECTED` .", "title": "RejectedPatchesAction", "type": "string" }, @@ -242419,12 +242409,12 @@ "additionalProperties": false, "properties": { "ApproveAfterDays": { - "markdownDescription": "The number of days after the release date of each patch matched by the rule that the patch is marked as approved in the patch baseline. For example, a value of `7` means that patches are approved seven days after they are released.\n\nYou must specify a value for `ApproveAfterDays` .\n\nException: Not supported on Debian Server or Ubuntu Server.", + "markdownDescription": "The number of days after the release date of each patch matched by the rule that the patch is marked as approved in the patch baseline. For example, a value of `7` means that patches are approved seven days after they are released.\n\nThis parameter is marked as `Required: No` , but your request must include a value for either `ApproveAfterDays` or `ApproveUntilDate` .\n\nNot supported for Debian Server or Ubuntu Server.\n\n> Use caution when setting this value for Windows Server patch baselines. Because patch updates that are replaced by later updates are removed, setting too broad a value for this parameter can result in crucial patches not being installed. For more information, see the *Windows Server* tab in the topic [How security patches are selected](https://docs.aws.amazon.com/systems-manager/latest/userguide/patch-manager-selecting-patches.html) in the *AWS Systems Manager User Guide* .", "title": "ApproveAfterDays", "type": "number" }, "ApproveUntilDate": { - "markdownDescription": "The cutoff date for auto approval of released patches. Any patches released on or before this date are installed automatically. Not supported on Debian Server or Ubuntu Server.\n\nEnter dates in the format `YYYY-MM-DD` . For example, `2021-12-31` .", + "markdownDescription": "The cutoff date for auto approval of released patches. Any patches released on or before this date are installed automatically.\n\nEnter dates in the format `YYYY-MM-DD` . For example, `2024-12-31` .\n\nThis parameter is marked as `Required: No` , but your request must include a value for either `ApproveUntilDate` or `ApproveAfterDays` .\n\nNot supported for Debian Server or Ubuntu Server.\n\n> Use caution when setting this value for Windows Server patch baselines. Because patch updates that are replaced by later updates are removed, setting too broad a value for this parameter can result in crucial patches not being installed. For more information, see the *Windows Server* tab in the topic [How security patches are selected](https://docs.aws.amazon.com/systems-manager/latest/userguide/patch-manager-selecting-patches.html) in the *AWS Systems Manager User Guide* .", "title": "ApproveUntilDate", "type": "string" }, @@ -248088,7 +248078,7 @@ "type": "string" }, "Environment": { - "markdownDescription": "The environment variables to set in the Docker container.\n\nThe maximum length of each key and value in the `Environment` map is 1024 bytes. The maximum length of all keys and values in the map, combined, is 32 KB. If you pass multiple containers to a `CreateModel` request, then the maximum length of all of their maps, combined, is also 32 KB.", + "markdownDescription": "The environment variables to set in the Docker container. Don't include any sensitive data in your environment variables.\n\nThe maximum length of each key and value in the `Environment` map is 1024 bytes. The maximum length of all keys and values in the map, combined, is 32 KB. If you pass multiple containers to a `CreateModel` request, then the maximum length of all of their maps, combined, is also 32 KB.", "title": "Environment", "type": "object" }, @@ -254536,7 +254526,7 @@ "properties": { "HostedRotationLambda": { "$ref": "#/definitions/AWS::SecretsManager::RotationSchedule.HostedRotationLambda", - "markdownDescription": "Creates a new Lambda rotation function based on one of the [Secrets Manager rotation function templates](https://docs.aws.amazon.com/secretsmanager/latest/userguide/reference_available-rotation-templates.html) . To use a rotation function that already exists, specify `RotationLambdaARN` instead.\n\nFor Amazon RDS master user credentials, see [AWS::RDS::DBCluster MasterUserSecret](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-rds-dbcluster-masterusersecret.html) .", + "markdownDescription": "Creates a new Lambda rotation function based on one of the [Secrets Manager rotation function templates](https://docs.aws.amazon.com/secretsmanager/latest/userguide/reference_available-rotation-templates.html) . To use a rotation function that already exists, specify `RotationLambdaARN` instead.\n\nFor Amazon RDS master user credentials, see [AWS::RDS::DBCluster MasterUserSecret](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-rds-dbcluster-masterusersecret.html) .\n\nFor Amazon Redshift admin user credentials, see [AWS::Redshift::Cluster](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-redshift-cluster.html) .", "title": "HostedRotationLambda" }, "RotateImmediatelyOnUpdate": { @@ -254545,7 +254535,7 @@ "type": "boolean" }, "RotationLambdaARN": { - "markdownDescription": "The ARN of an existing Lambda rotation function. To specify a rotation function that is also defined in this template, use the [Ref](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-ref.html) function.\n\nFor Amazon RDS master user credentials, see [AWS::RDS::DBCluster MasterUserSecret](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-rds-dbcluster-masterusersecret.html) .\n\nTo create a new rotation function based on one of the [Secrets Manager rotation function templates](https://docs.aws.amazon.com/secretsmanager/latest/userguide/reference_available-rotation-templates.html) , specify `HostedRotationLambda` instead.", + "markdownDescription": "The ARN of an existing Lambda rotation function. To specify a rotation function that is also defined in this template, use the [Ref](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-ref.html) function.\n\nFor Amazon RDS master user credentials, see [AWS::RDS::DBCluster MasterUserSecret](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-rds-dbcluster-masterusersecret.html) .\n\nFor Amazon Redshift admin user credentials, see [AWS::Redshift::Cluster](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-redshift-cluster.html) .\n\nTo create a new rotation function based on one of the [Secrets Manager rotation function templates](https://docs.aws.amazon.com/secretsmanager/latest/userguide/reference_available-rotation-templates.html) , specify `HostedRotationLambda` instead.", "title": "RotationLambdaARN", "type": "string" }, @@ -254555,7 +254545,7 @@ "title": "RotationRules" }, "SecretId": { - "markdownDescription": "The ARN or name of the secret to rotate.\n\nTo reference a secret also created in this template, use the [Ref](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-ref.html) function with the secret's logical ID.", + "markdownDescription": "The ARN or name of the secret to rotate. This is unique for each rotation schedule definition.\n\nTo reference a secret also created in this template, use the [Ref](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-ref.html) function with the secret's logical ID.", "title": "SecretId", "type": "string" } @@ -254881,7 +254871,7 @@ "additionalProperties": false, "properties": { "SecretId": { - "markdownDescription": "The ARN or name of the secret. To reference a secret also created in this template, use the see [Ref](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-ref.html) function with the secret's logical ID.", + "markdownDescription": "The ARN or name of the secret. To reference a secret also created in this template, use the see [Ref](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-ref.html) function with the secret's logical ID. This field is unique for each target attachment definition.", "title": "SecretId", "type": "string" }, @@ -254891,7 +254881,7 @@ "type": "string" }, "TargetType": { - "markdownDescription": "A string that defines the type of service or database associated with the secret. This value instructs Secrets Manager how to update the secret with the details of the service or database. This value must be one of the following:\n\n- AWS::RDS::DBInstance\n- AWS::RDS::DBCluster\n- AWS::Redshift::Cluster\n- AWS::DocDB::DBInstance\n- AWS::DocDB::DBCluster", + "markdownDescription": "A string that defines the type of service or database associated with the secret. This value instructs Secrets Manager how to update the secret with the details of the service or database. This value must be one of the following:\n\n- AWS::RDS::DBInstance\n- AWS::RDS::DBCluster\n- AWS::Redshift::Cluster\n- AWS::RedshiftServerless::Namespace\n- AWS::DocDB::DBInstance\n- AWS::DocDB::DBCluster\n- AWS::DocDBElastic::Cluster", "title": "TargetType", "type": "string" } @@ -255040,7 +255030,7 @@ "title": "FindingFieldsUpdate" }, "Type": { - "markdownDescription": "Specifies that the rule action should update the `Types` finding field. The `Types` finding field classifies findings in the format of namespace/category/classifier. For more information, see [Types taxonomy for ASFF](https://docs.aws.amazon.com/securityhub/latest/userguide/securityhub-findings-format-type-taxonomy.html) in the *AWS Security Hub User Guide* .", + "markdownDescription": "Specifies the type of action that Security Hub takes when a finding matches the defined criteria of a rule.", "title": "Type", "type": "string" } @@ -255537,7 +255527,7 @@ "type": "string" }, "Normalized": { - "markdownDescription": "The normalized severity for the finding. This attribute is to be deprecated in favor of `Label` .\n\nIf you provide `Normalized` and do not provide `Label` , `Label` is set automatically as follows.\n\n- 0 - `INFORMATIONAL`\n- 1\u201339 - `LOW`\n- 40\u201369 - `MEDIUM`\n- 70\u201389 - `HIGH`\n- 90\u2013100 - `CRITICAL`", + "markdownDescription": "The normalized severity for the finding. This attribute is to be deprecated in favor of `Label` .\n\nIf you provide `Normalized` and don't provide `Label` , `Label` is set automatically as follows.\n\n- 0 - `INFORMATIONAL`\n- 1\u201339 - `LOW`\n- 40\u201369 - `MEDIUM`\n- 70\u201389 - `HIGH`\n- 90\u2013100 - `CRITICAL`", "title": "Normalized", "type": "number" }, @@ -255573,7 +255563,7 @@ "additionalProperties": false, "properties": { "Status": { - "markdownDescription": "The status of the investigation into the finding. The workflow status is specific to an individual finding. It does not affect the generation of new findings. For example, setting the workflow status to `SUPPRESSED` or `RESOLVED` does not prevent a new finding for the same issue.\n\nThe allowed values are the following.\n\n- `NEW` - The initial state of a finding, before it is reviewed.\n\nSecurity Hub also resets `WorkFlowStatus` from `NOTIFIED` or `RESOLVED` to `NEW` in the following cases:\n\n- The record state changes from `ARCHIVED` to `ACTIVE` .\n- The compliance status changes from `PASSED` to either `WARNING` , `FAILED` , or `NOT_AVAILABLE` .\n- `NOTIFIED` - Indicates that you notified the resource owner about the security issue. Used when the initial reviewer is not the resource owner, and needs intervention from the resource owner.\n- `RESOLVED` - The finding was reviewed and remediated and is now considered resolved.\n- `SUPPRESSED` - Indicates that you reviewed the finding and do not believe that any action is needed. The finding is no longer updated.", + "markdownDescription": "The status of the investigation into the finding. The workflow status is specific to an individual finding. It does not affect the generation of new findings. For example, setting the workflow status to `SUPPRESSED` or `RESOLVED` does not prevent a new finding for the same issue.\n\nThe allowed values are the following.\n\n- `NEW` - The initial state of a finding, before it is reviewed.\n\nSecurity Hub also resets `WorkFlowStatus` from `NOTIFIED` or `RESOLVED` to `NEW` in the following cases:\n\n- The record state changes from `ARCHIVED` to `ACTIVE` .\n- The compliance status changes from `PASSED` to either `WARNING` , `FAILED` , or `NOT_AVAILABLE` .\n- `NOTIFIED` - Indicates that you notified the resource owner about the security issue. Used when the initial reviewer is not the resource owner, and needs intervention from the resource owner.\n- `RESOLVED` - The finding was reviewed and remediated and is now considered resolved.\n- `SUPPRESSED` - Indicates that you reviewed the finding and don't believe that any action is needed. The finding is no longer updated.", "title": "Status", "type": "string" } @@ -256612,7 +256602,7 @@ "items": { "$ref": "#/definitions/AWS::SecurityHub::Insight.StringFilter" }, - "markdownDescription": "The status of the investigation into a finding. Allowed values are the following.\n\n- `NEW` - The initial state of a finding, before it is reviewed.\n\nSecurity Hub also resets the workflow status from `NOTIFIED` or `RESOLVED` to `NEW` in the following cases:\n\n- `RecordState` changes from `ARCHIVED` to `ACTIVE` .\n- `Compliance.Status` changes from `PASSED` to either `WARNING` , `FAILED` , or `NOT_AVAILABLE` .\n- `NOTIFIED` - Indicates that the resource owner has been notified about the security issue. Used when the initial reviewer is not the resource owner, and needs intervention from the resource owner.\n\nIf one of the following occurs, the workflow status is changed automatically from `NOTIFIED` to `NEW` :\n\n- `RecordState` changes from `ARCHIVED` to `ACTIVE` .\n- `Compliance.Status` changes from `PASSED` to `FAILED` , `WARNING` , or `NOT_AVAILABLE` .\n- `SUPPRESSED` - Indicates that you reviewed the finding and do not believe that any action is needed.\n\nThe workflow status of a `SUPPRESSED` finding does not change if `RecordState` changes from `ARCHIVED` to `ACTIVE` .\n- `RESOLVED` - The finding was reviewed and remediated and is now considered resolved.\n\nThe finding remains `RESOLVED` unless one of the following occurs:\n\n- `RecordState` changes from `ARCHIVED` to `ACTIVE` .\n- `Compliance.Status` changes from `PASSED` to `FAILED` , `WARNING` , or `NOT_AVAILABLE` .\n\nIn those cases, the workflow status is automatically reset to `NEW` .\n\nFor findings from controls, if `Compliance.Status` is `PASSED` , then Security Hub automatically sets the workflow status to `RESOLVED` .", + "markdownDescription": "The status of the investigation into a finding. Allowed values are the following.\n\n- `NEW` - The initial state of a finding, before it is reviewed.\n\nSecurity Hub also resets the workflow status from `NOTIFIED` or `RESOLVED` to `NEW` in the following cases:\n\n- `RecordState` changes from `ARCHIVED` to `ACTIVE` .\n- `Compliance.Status` changes from `PASSED` to either `WARNING` , `FAILED` , or `NOT_AVAILABLE` .\n- `NOTIFIED` - Indicates that the resource owner has been notified about the security issue. Used when the initial reviewer is not the resource owner, and needs intervention from the resource owner.\n\nIf one of the following occurs, the workflow status is changed automatically from `NOTIFIED` to `NEW` :\n\n- `RecordState` changes from `ARCHIVED` to `ACTIVE` .\n- `Compliance.Status` changes from `PASSED` to `FAILED` , `WARNING` , or `NOT_AVAILABLE` .\n- `SUPPRESSED` - Indicates that you reviewed the finding and don't believe that any action is needed.\n\nThe workflow status of a `SUPPRESSED` finding does not change if `RecordState` changes from `ARCHIVED` to `ACTIVE` .\n- `RESOLVED` - The finding was reviewed and remediated and is now considered resolved.\n\nThe finding remains `RESOLVED` unless one of the following occurs:\n\n- `RecordState` changes from `ARCHIVED` to `ACTIVE` .\n- `Compliance.Status` changes from `PASSED` to `FAILED` , `WARNING` , or `NOT_AVAILABLE` .\n\nIn those cases, the workflow status is automatically reset to `NEW` .\n\nFor findings from controls, if `Compliance.Status` is `PASSED` , then Security Hub automatically sets the workflow status to `RESOLVED` .", "title": "WorkflowStatus", "type": "array" } @@ -259838,7 +259828,7 @@ "items": { "$ref": "#/definitions/AWS::ServiceDiscovery::Service.DnsRecord" }, - "markdownDescription": "An array that contains one `DnsRecord` object for each Route\u00a053 DNS record that you want AWS Cloud Map to create when you register an instance.", + "markdownDescription": "An array that contains one `DnsRecord` object for each Route\u00a053 DNS record that you want AWS Cloud Map to create when you register an instance.\n\n> The record type of a service can't be updated directly and can only be changed by deleting the service and recreating it with a new `DnsConfig` .", "title": "DnsRecords", "type": "array" }, @@ -261018,7 +261008,7 @@ "items": { "type": "string" }, - "markdownDescription": "A list of Amazon CloudWatch alarms to be monitored during the deployment. The deployment fails and rolls back if any of these alarms go into the `ALARM` state.", + "markdownDescription": "A list of Amazon CloudWatch alarm names to be monitored during the deployment. The deployment fails and rolls back if any of these alarms go into the `ALARM` state.\n\n> Amazon CloudWatch considers nonexistent alarms to have an `OK` state. If you provide an invalid alarm name or provide the ARN of an alarm instead of its name, your deployment may not roll back correctly.", "title": "Alarms", "type": "array" }, diff --git a/schema_source/sam.schema.json b/schema_source/sam.schema.json index 1a897cdfb..14954abfb 100644 --- a/schema_source/sam.schema.json +++ b/schema_source/sam.schema.json @@ -5525,6 +5525,9 @@ "markdownDescription": "Create a snapshot of any new Lambda function version\\. A snapshot is a cached state of your initialized function, including all of its dependencies\\. The function is initialized just once and the cached state is reused for all future invocations, improving application performance by reducing the number of times your function must be initialized\\. To learn more, see [Improving startup performance with Lambda SnapStart](https://docs.aws.amazon.com/lambda/latest/dg/snapstart.html) in the *AWS Lambda Developer Guide*\\. \n*Type*: [SnapStart](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lambda-function-snapstart.html) \n*Required*: No \n*AWS CloudFormation compatibility*: This property is passed directly to the [`SnapStart`](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lambda-function-snapstart.html) property of an `AWS::Lambda::Function` resource\\.", "title": "SnapStart" }, + "SourceKMSKeyArn": { + "$ref": "#/definitions/PassThroughProp" + }, "Tags": { "markdownDescription": "A map \\(string to string\\) that specifies the tags added to this function\\. For details about valid keys and values for tags, see [Tag Key and Value Requirements](https://docs.aws.amazon.com/lambda/latest/dg/configuration-tags.html#configuration-tags-restrictions) in the *AWS Lambda Developer Guide*\\. \nWhen the stack is created, AWS SAM automatically adds a `lambda:createdBy:SAM` tag to this Lambda function, and to the default roles that are generated for this function\\. \n*Type*: Map \n*Required*: No \n*AWS CloudFormation compatibility*: This property is similar to the [`Tags`](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lambda-function.html#cfn-lambda-function-tags) property of an `AWS::Lambda::Function` resource\\. The `Tags` property in AWS SAM consists of key\\-value pairs \\(whereas in AWS CloudFormation this property consists of a list of `Tag` objects\\)\\. Also, AWS SAM automatically adds a `lambda:createdBy:SAM` tag to this Lambda function, and to the default roles that are generated for this function\\.", "title": "Tags", @@ -6110,6 +6113,9 @@ "markdownDescription": "Create a snapshot of any new Lambda function version\\. A snapshot is a cached state of your initialized function, including all of its dependencies\\. The function is initialized just once and the cached state is reused for all future invocations, improving application performance by reducing the number of times your function must be initialized\\. To learn more, see [Improving startup performance with Lambda SnapStart](https://docs.aws.amazon.com/lambda/latest/dg/snapstart.html) in the *AWS Lambda Developer Guide*\\. \n*Type*: [SnapStart](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lambda-function-snapstart.html) \n*Required*: No \n*AWS CloudFormation compatibility*: This property is passed directly to the [`SnapStart`](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-lambda-function-snapstart.html) property of an `AWS::Lambda::Function` resource\\.", "title": "SnapStart" }, + "SourceKMSKeyArn": { + "$ref": "#/definitions/PassThroughProp" + }, "Tags": { "markdownDescription": "A map \\(string to string\\) that specifies the tags added to this function\\. For details about valid keys and values for tags, see [Tag Key and Value Requirements](https://docs.aws.amazon.com/lambda/latest/dg/configuration-tags.html#configuration-tags-restrictions) in the *AWS Lambda Developer Guide*\\. \nWhen the stack is created, AWS SAM automatically adds a `lambda:createdBy:SAM` tag to this Lambda function, and to the default roles that are generated for this function\\. \n*Type*: Map \n*Required*: No \n*AWS CloudFormation compatibility*: This property is similar to the [`Tags`](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lambda-function.html#cfn-lambda-function-tags) property of an `AWS::Lambda::Function` resource\\. The `Tags` property in AWS SAM consists of key\\-value pairs \\(whereas in AWS CloudFormation this property consists of a list of `Tag` objects\\)\\. Also, AWS SAM automatically adds a `lambda:createdBy:SAM` tag to this Lambda function, and to the default roles that are generated for this function\\.", "title": "Tags", @@ -8044,6 +8050,10 @@ ], "markdownDescription": "The type of the state machine\\. \n*Valid values*: `STANDARD` or `EXPRESS` \n*Type*: String \n*Required*: No \n*Default*: `STANDARD` \n*AWS CloudFormation compatibility*: This property is passed directly to the [`StateMachineType`](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-stepfunctions-statemachine.html#cfn-stepfunctions-statemachine-statemachinetype) property of an `AWS::StepFunctions::StateMachine` resource\\.", "title": "Type" + }, + "UseAliasAsEventTarget": { + "title": "Usealiasaseventtarget", + "type": "boolean" } }, "title": "Properties", diff --git a/tests/model/stepfunctions/test_state_machine_generator.py b/tests/model/stepfunctions/test_state_machine_generator.py index a4f801c81..968708152 100644 --- a/tests/model/stepfunctions/test_state_machine_generator.py +++ b/tests/model/stepfunctions/test_state_machine_generator.py @@ -148,6 +148,37 @@ def test_state_machine_with_unsupported_event_source(self): with self.assertRaises(InvalidEventException): StateMachineGenerator(**self.kwargs).to_cloudformation() + def test_state_machine_with_alias_as_event_source_target(self): + self.kwargs["definition_uri"] = "s3://mybucket/myASLfile" + self.kwargs["role"] = "my-test-role-arn" + self.kwargs["use_alias_as_event_target"] = True + self.kwargs["auto_publish_alias"] = "live" + event_resolver = Mock() + event_resolver.resolve_resource_type = Mock(return_value=CloudWatchEvent) + self.kwargs["event_resolver"] = event_resolver + self.kwargs["events"] = { + "CWEEvent": {"Type": "CloudWatchEvent", "Properties": {"Pattern": {"detail": {"state": ["terminated"]}}}} + } + self.kwargs["event_resources"] = {"CWEEvent": {}} + state_machine_generator = StateMachineGenerator(**self.kwargs) + state_machine_generator._generate_managed_traffic_shifting_resources() + generated_event_resources = state_machine_generator._generate_event_resources() + self.assertEqual(generated_event_resources[0].Targets[0]["Arn"], {"Ref": "StateMachineIdAliaslive"}) + + def test_state_machine_with_alias_as_event_source_target_requires_alias(self): + self.kwargs["definition_uri"] = "s3://mybucket/myASLfile" + self.kwargs["role"] = "my-test-role-arn" + self.kwargs["use_alias_as_event_target"] = True + self.kwargs["deployment_preference"] = {"Type": "ALL_AT_ONCE"} + # Missing property + # self.kwargs["auto_publish_alias"] = "live" + with self.assertRaises(InvalidResourceException) as error: + StateMachineGenerator(**self.kwargs).to_cloudformation() + self.assertEqual( + error.exception.message, + "Resource with id [StateMachineId] is invalid. 'UseAliasAsEventTarget' requires 'AutoPublishAlias' property to be specified.", + ) + def test_state_machine_with_managed_traffic_shifting_properties(self): self.kwargs["definition_uri"] = "s3://mybucket/myASLfile" self.kwargs["role"] = "my-test-role-arn" diff --git a/tests/translator/input/all_policy_templates.yaml b/tests/translator/input/all_policy_templates.yaml index 0bcd0de51..c2666a9c5 100644 --- a/tests/translator/input/all_policy_templates.yaml +++ b/tests/translator/input/all_policy_templates.yaml @@ -170,7 +170,7 @@ Resources: EventBusName: name - AcmGetCertificatePolicy: - CertificateArn: arn + CertificateArn: arn:aws:acm:us-west-2:987654321098:certificate/dec86919-7219-4e8d-8871-7f1609df2c7f - Route53ChangeResourceRecordSetsPolicy: HostedZoneId: test diff --git a/tests/translator/input/api_http_with_default_iam_authorizer.yaml b/tests/translator/input/api_http_with_default_iam_authorizer.yaml index a2bfabbb2..4d7d96107 100644 --- a/tests/translator/input/api_http_with_default_iam_authorizer.yaml +++ b/tests/translator/input/api_http_with_default_iam_authorizer.yaml @@ -4,7 +4,7 @@ Resources: Properties: CodeUri: s3://bucket/key Handler: app.lambda_handler - Runtime: python3.8 + Runtime: python3.11 Role: Fn::GetAtt: - HelloWorldFunctionRole diff --git a/tests/translator/input/api_merge_definitions_with_any_method.yaml b/tests/translator/input/api_merge_definitions_with_any_method.yaml index 0b50b1aed..3e689f453 100644 --- a/tests/translator/input/api_merge_definitions_with_any_method.yaml +++ b/tests/translator/input/api_merge_definitions_with_any_method.yaml @@ -15,7 +15,7 @@ Resources: Properties: CodeUri: s3://bucket/key Handler: code/handler - Runtime: python3.8 + Runtime: python3.11 Events: AllEvent: Type: Api diff --git a/tests/translator/input/api_with_merge_definitions_null_paths.yaml b/tests/translator/input/api_with_merge_definitions_null_paths.yaml index 797eac94c..4c3b19f65 100644 --- a/tests/translator/input/api_with_merge_definitions_null_paths.yaml +++ b/tests/translator/input/api_with_merge_definitions_null_paths.yaml @@ -16,7 +16,7 @@ Resources: Properties: CodeUri: s3://bucket/key Handler: code/handler - Runtime: python3.8 + Runtime: python3.11 Events: AllEvent: Type: Api diff --git a/tests/translator/input/cognito_user_pool_with_new_property_and_cognito_event.yaml b/tests/translator/input/cognito_user_pool_with_new_property_and_cognito_event.yaml index 60056fcbc..9ddac00bb 100644 --- a/tests/translator/input/cognito_user_pool_with_new_property_and_cognito_event.yaml +++ b/tests/translator/input/cognito_user_pool_with_new_property_and_cognito_event.yaml @@ -8,7 +8,7 @@ Resources: MyFunction: Type: AWS::Serverless::Function Properties: - Runtime: python3.8 + Runtime: python3.11 InlineCode: foo Handler: bar Events: diff --git a/tests/translator/input/connector_with_non_id_source_and_destination.yaml b/tests/translator/input/connector_with_non_id_source_and_destination.yaml index 10a0fcf10..c91311b9d 100644 --- a/tests/translator/input/connector_with_non_id_source_and_destination.yaml +++ b/tests/translator/input/connector_with_non_id_source_and_destination.yaml @@ -11,7 +11,7 @@ Resources: Principal: Service: lambda.amazonaws.com ManagedPolicyArns: - - arn:{AWS::Partition}:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole + - !Sub arn:${AWS::Partition}:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole SamFunction: Type: AWS::Serverless::Function diff --git a/tests/translator/input/embedded_connectors_hardcoded_props.yaml b/tests/translator/input/embedded_connectors_hardcoded_props.yaml index 5c84285d9..da59bf5eb 100644 --- a/tests/translator/input/embedded_connectors_hardcoded_props.yaml +++ b/tests/translator/input/embedded_connectors_hardcoded_props.yaml @@ -2,6 +2,15 @@ Transform: AWS::Serverless-2016-10-31 Resources: MyFunction: Type: AWS::Lambda::Function + Properties: + Handler: index.handler + Role: arn:aws:iam::111122223333:role/lambda-role + Runtime: nodejs20.x + Code: + ZipFile: | + exports.handler = function(event, context, callback) { + return callback(null, 'success'); + } MyRule: Type: AWS::Events::Rule @@ -13,6 +22,8 @@ Resources: Arn: !Ref MyTopic Permissions: - Write + Properties: + ScheduleExpression: rate(5 minutes) MyQueue: Type: AWS::SQS::Queue @@ -42,6 +53,8 @@ Resources: Id: MyFunction Permissions: - Write + Properties: + Name: MyApiV1 MyApiV2: Type: AWS::ApiGatewayV2::Api @@ -54,6 +67,8 @@ Resources: Id: MyFunction Permissions: - Write + Properties: + Name: MyApiV2 MySNSTopic: Type: AWS::SNS::Topic diff --git a/tests/translator/input/error_invalid_httpapi_cors_property.yaml b/tests/translator/input/error_invalid_httpapi_cors_property.yaml new file mode 100644 index 000000000..2dac2029c --- /dev/null +++ b/tests/translator/input/error_invalid_httpapi_cors_property.yaml @@ -0,0 +1,22 @@ +Resources: + HttpApi: + Type: AWS::Serverless::HttpApi + Properties: + StageName: stagename + DefaultRouteSettings: + ThrottlingBurstLimit: 200 + RouteSettings: + GET /path: + ThrottlingBurstLimit: 500 # overridden in HttpApi Event + StageVariables: + StageVar: Value + FailOnWarnings: true + CorsConfiguration: + AllowOrigin: + - https://example.com + AllowHeaders: + - x-apigateway-header + AllowMethods: + - GET + MaxAge: 600 + AllowCredentials: true diff --git a/tests/translator/input/error_state_machine_with_api_intrinsics.yaml b/tests/translator/input/error_state_machine_with_api_intrinsics.yaml index 5e45422de..e6739d6a9 100644 --- a/tests/translator/input/error_state_machine_with_api_intrinsics.yaml +++ b/tests/translator/input/error_state_machine_with_api_intrinsics.yaml @@ -12,7 +12,7 @@ Resources: print(event) return "do nothing" Handler: index.handler - Runtime: python3.8 + Runtime: python3.11 Post: Type: AWS::Serverless::StateMachine Properties: diff --git a/tests/translator/input/function_with_alias_and_all_properties_property.yaml b/tests/translator/input/function_with_alias_and_all_properties_property.yaml index dcd9b3e27..0154aa603 100644 --- a/tests/translator/input/function_with_alias_and_all_properties_property.yaml +++ b/tests/translator/input/function_with_alias_and_all_properties_property.yaml @@ -42,7 +42,7 @@ Resources: - x86_64 EphemeralStorage: Size: 1024 - Role: !Sub arn:${AWS::Partition}:iam::role + Role: !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/role1 KmsKeyArn: !Sub arn:${AWS::Partition}:key:key FileSystemConfigs: - Arn: !GetAtt AccessPoint.Arn @@ -72,7 +72,7 @@ Resources: - x86_64 EphemeralStorage: Size: 1024 - Role: !Sub arn:${AWS::Partition}:iam::role + Role: !Sub arn:${AWS::Partition}:iam::${AWS::AccountId}:role/role1 KmsKeyArn: !Sub arn:${AWS::Partition}:key:key FileSystemConfigs: - Arn: !GetAtt AccessPoint.Arn diff --git a/tests/translator/input/function_with_cw_event.yaml b/tests/translator/input/function_with_cw_event.yaml index 4e550988b..5f62cefc1 100644 --- a/tests/translator/input/function_with_cw_event.yaml +++ b/tests/translator/input/function_with_cw_event.yaml @@ -3,7 +3,7 @@ Resources: MyFunction: Type: AWS::Serverless::Function Properties: - Runtime: python3.8 + Runtime: python3.11 Handler: foo InlineCode: bar Events: diff --git a/tests/translator/input/function_with_ignore_globals.yaml b/tests/translator/input/function_with_ignore_globals.yaml index e16fcb91b..a36c02014 100644 --- a/tests/translator/input/function_with_ignore_globals.yaml +++ b/tests/translator/input/function_with_ignore_globals.yaml @@ -1,6 +1,6 @@ Globals: Function: - Runtime: python3.8 + Runtime: python3.11 Handler: index.lambda_handler MemorySize: 128 diff --git a/tests/translator/input/function_with_null_events.yaml b/tests/translator/input/function_with_null_events.yaml index 5dc591caa..fd9ac2447 100644 --- a/tests/translator/input/function_with_null_events.yaml +++ b/tests/translator/input/function_with_null_events.yaml @@ -4,5 +4,5 @@ Resources: Properties: CodeUri: s3://sam-demo-bucket/queues.zip Handler: handlers.handler - Runtime: python3.8 + Runtime: python3.11 Events: diff --git a/tests/translator/input/function_with_runtime_config.yaml b/tests/translator/input/function_with_runtime_config.yaml index a9302aa10..1c5d6e565 100644 --- a/tests/translator/input/function_with_runtime_config.yaml +++ b/tests/translator/input/function_with_runtime_config.yaml @@ -12,7 +12,7 @@ Resources: Properties: CodeUri: s3://sam-demo-bucket/hello.zip Handler: hello.handler - Runtime: python3.8 + Runtime: python3.11 RuntimeManagementConfig: UpdateRuntimeOn: Auto MinimalFunctionWithManualRuntimeManagementConfig: @@ -20,16 +20,16 @@ Resources: Properties: CodeUri: s3://sam-demo-bucket/hello.zip Handler: hello.handler - Runtime: python3.8 + Runtime: python3.11 RuntimeManagementConfig: UpdateRuntimeOn: Manual - RuntimeVersionArn: !Sub arn:aws:lambda:${AWS::Region}::runtime:python3.8::0af1966588ced06e3143ae720245c9b7aeaae213c6921c12c742a166679cc505 + RuntimeVersionArn: !Sub arn:aws:lambda:${AWS::Region}::runtime:python3.11::0af1966588ced06e3143ae720245c9b7aeaae213c6921c12c742a166679cc505 FunctionWithRuntimeManagementConfigAndAlias: Type: AWS::Serverless::Function Properties: CodeUri: s3://sam-demo-bucket/hello.zip Handler: hello.handler - Runtime: python3.8 + Runtime: python3.11 AutoPublishAlias: live RuntimeManagementConfig: UpdateRuntimeOn: Auto @@ -38,7 +38,7 @@ Resources: Properties: CodeUri: s3://sam-demo-bucket/hello.zip Handler: hello.handler - Runtime: python3.8 + Runtime: python3.11 RuntimeManagementConfig: UpdateRuntimeOn: !Ref RuntimeUpdateParam FunctionWithIntrinsicRuntimeVersion: @@ -46,7 +46,7 @@ Resources: Properties: CodeUri: s3://sam-demo-bucket/hello.zip Handler: hello.handler - Runtime: python3.8 + Runtime: python3.11 RuntimeManagementConfig: UpdateRuntimeOn: !Ref RuntimeUpdateParam RuntimeVersionArn: !Ref RuntimeVersionParam diff --git a/tests/translator/input/function_with_sns_event_source_all_parameters.yaml b/tests/translator/input/function_with_sns_event_source_all_parameters.yaml index d4a0c767a..18be33cec 100644 --- a/tests/translator/input/function_with_sns_event_source_all_parameters.yaml +++ b/tests/translator/input/function_with_sns_event_source_all_parameters.yaml @@ -9,7 +9,7 @@ Resources: NotificationTopic: Type: SNS Properties: - Topic: topicArn-letsAddMoreSymbols + Topic: arn:aws:sns:us-west-2:987654321098:key/dec86919-7219-4e8d-8871-7f1609df2c7f Region: region FilterPolicy: store: diff --git a/tests/translator/input/function_with_sourcekmskeyarn.yaml b/tests/translator/input/function_with_sourcekmskeyarn.yaml new file mode 100644 index 000000000..a666448f6 --- /dev/null +++ b/tests/translator/input/function_with_sourcekmskeyarn.yaml @@ -0,0 +1,21 @@ +Parameters: + SourceKMSKeyArnParam: + Type: String + Default: arn:aws:kms:us-west-2:123456789012:key/dec86919-7219-4e8d-8871-7f1609df2c7f + +Resources: + SourceKMSKeyArnFunction: + Type: AWS::Serverless::Function + Properties: + CodeUri: s3://sam-demo-bucket/hello.zip + Handler: hello.handler + Runtime: python3.9 + SourceKMSKeyArn: arn:aws:kms:us-west-2:987654321098:key/dec86919-7219-4e8d-8871-7f1609df2c7f + + SourceKMSKeyArnParameterFunction: + Type: AWS::Serverless::Function + Properties: + CodeUri: s3://sam-demo-bucket/hello.zip + Handler: hello.handler + Runtime: python3.9 + SourceKMSKeyArn: !Ref SourceKMSKeyArnParam diff --git a/tests/translator/input/globals_for_function.yaml b/tests/translator/input/globals_for_function.yaml index 464a73204..a4cf09729 100644 --- a/tests/translator/input/globals_for_function.yaml +++ b/tests/translator/input/globals_for_function.yaml @@ -33,6 +33,7 @@ Globals: LoggingConfig: LogGroup: myJsonStructuredLogs RecursiveLoop: ALLOW + SourceKMSKeyArn: arn:aws:kms:us-west-2:123456789012:key/dec86919-7219-4e8d-8871-7f1609df2c7f @@ -67,3 +68,4 @@ Resources: RuntimeManagementConfig: UpdateRuntimeOn: FunctionChange RecursiveLoop: TERMINATE + SourceKMSKeyArn: arn:aws:kms:us-west-2:987654321098:key/dec86919-7219-4e8d-8871-7f1609df2c7f diff --git a/tests/translator/input/graphqlapi_cognito_default_auth.yaml b/tests/translator/input/graphqlapi_cognito_default_auth.yaml index 7754ffa96..9078f87f7 100644 --- a/tests/translator/input/graphqlapi_cognito_default_auth.yaml +++ b/tests/translator/input/graphqlapi_cognito_default_auth.yaml @@ -16,7 +16,7 @@ Resources: AppIdClientRegex: myregex AwsRegion: na-east-1 # This default action will exist post transform since this is our default authentication. - DefaultAction: something + DefaultAction: ALLOW UserPoolId: myid Tags: key1: value1 diff --git a/tests/translator/input/graphqlapi_ddb_datasource_connector.yaml b/tests/translator/input/graphqlapi_ddb_datasource_connector.yaml index 1f6567fc1..188845c0f 100644 --- a/tests/translator/input/graphqlapi_ddb_datasource_connector.yaml +++ b/tests/translator/input/graphqlapi_ddb_datasource_connector.yaml @@ -18,7 +18,7 @@ Resources: DynamoDb: MyDataSource: TableName: some-table - TableArn: big-arn + TableArn: arn:aws:dynamodb:us-west-2:987654321098:table/dec86919-7219-4e8d-8871-7f1609df2c7f AnotherDataSource: TableName: cool-table - TableArn: table-arn + TableArn: arn:aws:dynamodb:us-west-2:987654321098:table/dec86919-7219-4e8d-8871-7f1609df2c7f diff --git a/tests/translator/input/graphqlapi_function_by_id.yaml b/tests/translator/input/graphqlapi_function_by_id.yaml index 6c45cc637..d089338e6 100644 --- a/tests/translator/input/graphqlapi_function_by_id.yaml +++ b/tests/translator/input/graphqlapi_function_by_id.yaml @@ -22,5 +22,5 @@ Resources: DataSourceName: some-cool-datasource Name: MyFunction Runtime: - Name: some-runtime + Name: APPSYNC_JS RuntimeVersion: 1.2.3 diff --git a/tests/translator/input/graphqlapi_lambda_datasource_connector.yaml b/tests/translator/input/graphqlapi_lambda_datasource_connector.yaml index 0493731a2..e6525e4ce 100644 --- a/tests/translator/input/graphqlapi_lambda_datasource_connector.yaml +++ b/tests/translator/input/graphqlapi_lambda_datasource_connector.yaml @@ -17,4 +17,4 @@ Resources: DataSources: Lambda: MyDataSource: - FunctionArn: blah + FunctionArn: arn:aws:lambda:us-west-2:987654321098:function:dec86919-7219-4e8d-8871-7f1609df2c7f diff --git a/tests/translator/input/graphqlapi_multiple_none_datasource_functions.yaml b/tests/translator/input/graphqlapi_multiple_none_datasource_functions.yaml index f71751ecc..a711ffd00 100644 --- a/tests/translator/input/graphqlapi_multiple_none_datasource_functions.yaml +++ b/tests/translator/input/graphqlapi_multiple_none_datasource_functions.yaml @@ -15,23 +15,23 @@ Resources: CodeUri: my-code-uri DataSource: NONE Runtime: - Name: some-runtime + Name: APPSYNC_JS Version: 1.2.3 AnotherFunction: CodeUri: my-code-uri DataSource: None Runtime: - Name: some-runtime + Name: APPSYNC_JS Version: 1.2.3 SimilarFunction: CodeUri: my-code-uri DataSource: none Runtime: - Name: some-runtime + Name: APPSYNC_JS Version: 1.2.3 GoodFunction: CodeUri: my-code-uri DataSource: nOnE Runtime: - Name: some-runtime + Name: APPSYNC_JS Version: 1.2.3 diff --git a/tests/translator/input/graphqlapi_resolver_function_with_lambda_datasource.yaml b/tests/translator/input/graphqlapi_resolver_function_with_lambda_datasource.yaml index eff5425cc..98a77993e 100644 --- a/tests/translator/input/graphqlapi_resolver_function_with_lambda_datasource.yaml +++ b/tests/translator/input/graphqlapi_resolver_function_with_lambda_datasource.yaml @@ -50,7 +50,7 @@ Resources: Functions: MyFunction: Runtime: - Name: some-runtime + Name: APPSYNC_JS Version: 1.2.3 InlineCode: this is my epic code DataSource: MyDataSource diff --git a/tests/translator/input/inline_precedence.yaml b/tests/translator/input/inline_precedence.yaml index 6b96bab62..51bef8b22 100644 --- a/tests/translator/input/inline_precedence.yaml +++ b/tests/translator/input/inline_precedence.yaml @@ -7,4 +7,4 @@ Resources: pass" CodeUri: . Handler: index.lambda_handler - Runtime: python3.8 + Runtime: python3.11 diff --git a/tests/translator/input/managed_policies_everything.yaml b/tests/translator/input/managed_policies_everything.yaml index 80b724ff1..0a5592414 100644 --- a/tests/translator/input/managed_policies_everything.yaml +++ b/tests/translator/input/managed_policies_everything.yaml @@ -13,7 +13,7 @@ Resources: MyFunction: Type: AWS::Serverless::Function Properties: - Runtime: python3.8 + Runtime: python3.11 Handler: foo InlineCode: bar Policies: diff --git a/tests/translator/input/managed_policies_minimal.yaml b/tests/translator/input/managed_policies_minimal.yaml index 8f82c6cc2..5e2d40bca 100644 --- a/tests/translator/input/managed_policies_minimal.yaml +++ b/tests/translator/input/managed_policies_minimal.yaml @@ -2,7 +2,7 @@ Resources: MyFunction: Type: AWS::Serverless::Function Properties: - Runtime: python3.8 + Runtime: python3.11 Handler: foo InlineCode: bar Policies: diff --git a/tests/translator/input/schema_validation_4.yaml b/tests/translator/input/schema_validation_4.yaml index d790743dc..41070daeb 100644 --- a/tests/translator/input/schema_validation_4.yaml +++ b/tests/translator/input/schema_validation_4.yaml @@ -59,7 +59,7 @@ Resources: OtherFunction: Type: AWS::Serverless::Function Properties: - Runtime: python3.8 + Runtime: python3.11 Handler: foo InlineCode: bar Environment: diff --git a/tests/translator/input/state_machine_with_api.yaml b/tests/translator/input/state_machine_with_api.yaml index 086995574..df4001847 100644 --- a/tests/translator/input/state_machine_with_api.yaml +++ b/tests/translator/input/state_machine_with_api.yaml @@ -12,7 +12,7 @@ Resources: print(event) return "do nothing" Handler: index.handler - Runtime: python3.8 + Runtime: python3.11 Post: Type: AWS::Serverless::StateMachine Properties: diff --git a/tests/translator/input/state_machine_with_api_single.yaml b/tests/translator/input/state_machine_with_api_single.yaml index 0fe28583e..3720506d8 100644 --- a/tests/translator/input/state_machine_with_api_single.yaml +++ b/tests/translator/input/state_machine_with_api_single.yaml @@ -12,7 +12,7 @@ Resources: print(event) return "do nothing" Handler: index.handler - Runtime: python3.8 + Runtime: python3.11 Post: Type: AWS::Serverless::StateMachine Properties: diff --git a/tests/translator/input/state_machine_with_events_and_alias.yaml b/tests/translator/input/state_machine_with_events_and_alias.yaml new file mode 100644 index 000000000..1b954c122 --- /dev/null +++ b/tests/translator/input/state_machine_with_events_and_alias.yaml @@ -0,0 +1,45 @@ +Transform: AWS::Serverless-2016-10-31 +Resources: + MyStateMachine: + Type: AWS::Serverless::StateMachine + Properties: + Type: STANDARD + Definition: + StartAt: HelloWorld + States: + HelloWorld: + Type: Pass + Result: 1 + End: true + Role: !Sub "arn:${AWS::Partition}:iam::${AWS::AccountId}:role/doesNotExist" + AutoPublishAlias: test + UseAliasAsEventTarget: true + Events: + CWEvent: + Type: CloudWatchEvent + Properties: + Pattern: + detail: + state: + - terminated + EBEvent: + Type: EventBridgeRule + Properties: + Pattern: + source: [aws.tag] + ApiEvent: + Type: Api + Properties: + Path: /path + Method: get + CWSchedule: + Type: Schedule + Properties: + Schedule: rate(1 minute) + Name: TestSchedule + Description: test schedule + Enabled: false + ScheduleEvent: + Type: ScheduleV2 + Properties: + ScheduleExpression: rate(1 minute) diff --git a/tests/translator/input/state_machine_with_sam_policy_templates.yaml b/tests/translator/input/state_machine_with_sam_policy_templates.yaml index 388ddea30..9671ace84 100644 --- a/tests/translator/input/state_machine_with_sam_policy_templates.yaml +++ b/tests/translator/input/state_machine_with_sam_policy_templates.yaml @@ -14,7 +14,7 @@ Resources: Properties: CodeUri: s3://sam-demo-bucket/resolver.zip Handler: resolver.handler - Runtime: python3.8 + Runtime: python3.11 NestedWorkflow: Type: AWS::Serverless::StateMachine diff --git a/tests/translator/output/all_policy_templates.json b/tests/translator/output/all_policy_templates.json index 6c9ee828b..07507ade0 100644 --- a/tests/translator/output/all_policy_templates.json +++ b/tests/translator/output/all_policy_templates.json @@ -1580,7 +1580,7 @@ "Fn::Sub": [ "${certificateArn}", { - "certificateArn": "arn" + "certificateArn": "arn:aws:acm:us-west-2:987654321098:certificate/dec86919-7219-4e8d-8871-7f1609df2c7f" } ] } diff --git a/tests/translator/output/api_http_with_default_iam_authorizer.json b/tests/translator/output/api_http_with_default_iam_authorizer.json index 6d778afbf..bc27df20b 100644 --- a/tests/translator/output/api_http_with_default_iam_authorizer.json +++ b/tests/translator/output/api_http_with_default_iam_authorizer.json @@ -16,7 +16,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/api_merge_definitions_with_any_method.json b/tests/translator/output/api_merge_definitions_with_any_method.json index 6d8a0ea0e..c6b65e2bb 100644 --- a/tests/translator/output/api_merge_definitions_with_any_method.json +++ b/tests/translator/output/api_merge_definitions_with_any_method.json @@ -46,7 +46,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/api_with_merge_definitions_null_paths.json b/tests/translator/output/api_with_merge_definitions_null_paths.json index 3cddd4eef..91b39e6e8 100644 --- a/tests/translator/output/api_with_merge_definitions_null_paths.json +++ b/tests/translator/output/api_with_merge_definitions_null_paths.json @@ -47,7 +47,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/aws-cn/all_policy_templates.json b/tests/translator/output/aws-cn/all_policy_templates.json index fcda9713e..8a915b89a 100644 --- a/tests/translator/output/aws-cn/all_policy_templates.json +++ b/tests/translator/output/aws-cn/all_policy_templates.json @@ -1580,7 +1580,7 @@ "Fn::Sub": [ "${certificateArn}", { - "certificateArn": "arn" + "certificateArn": "arn:aws:acm:us-west-2:987654321098:certificate/dec86919-7219-4e8d-8871-7f1609df2c7f" } ] } diff --git a/tests/translator/output/aws-cn/api_http_with_default_iam_authorizer.json b/tests/translator/output/aws-cn/api_http_with_default_iam_authorizer.json index 6d778afbf..bc27df20b 100644 --- a/tests/translator/output/aws-cn/api_http_with_default_iam_authorizer.json +++ b/tests/translator/output/aws-cn/api_http_with_default_iam_authorizer.json @@ -16,7 +16,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/aws-cn/api_merge_definitions_with_any_method.json b/tests/translator/output/aws-cn/api_merge_definitions_with_any_method.json index de73f4dc3..c31894edc 100644 --- a/tests/translator/output/aws-cn/api_merge_definitions_with_any_method.json +++ b/tests/translator/output/aws-cn/api_merge_definitions_with_any_method.json @@ -54,7 +54,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/aws-cn/api_with_merge_definitions_null_paths.json b/tests/translator/output/aws-cn/api_with_merge_definitions_null_paths.json index 0a754aa07..1720309cf 100644 --- a/tests/translator/output/aws-cn/api_with_merge_definitions_null_paths.json +++ b/tests/translator/output/aws-cn/api_with_merge_definitions_null_paths.json @@ -55,7 +55,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/aws-cn/cognito_user_pool_with_new_property_and_cognito_event.json b/tests/translator/output/aws-cn/cognito_user_pool_with_new_property_and_cognito_event.json index 94cc8b3f4..fadd1cc52 100644 --- a/tests/translator/output/aws-cn/cognito_user_pool_with_new_property_and_cognito_event.json +++ b/tests/translator/output/aws-cn/cognito_user_pool_with_new_property_and_cognito_event.json @@ -12,7 +12,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/aws-cn/connector_with_non_id_source_and_destination.json b/tests/translator/output/aws-cn/connector_with_non_id_source_and_destination.json index 26dadfc85..c730b0470 100644 --- a/tests/translator/output/aws-cn/connector_with_non_id_source_and_destination.json +++ b/tests/translator/output/aws-cn/connector_with_non_id_source_and_destination.json @@ -183,7 +183,9 @@ ] }, "ManagedPolicyArns": [ - "arn:{AWS::Partition}:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" + { + "Fn::Sub": "arn:${AWS::Partition}:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" + } ] }, "Type": "AWS::IAM::Role" diff --git a/tests/translator/output/aws-cn/embedded_connectors_hardcoded_props.json b/tests/translator/output/aws-cn/embedded_connectors_hardcoded_props.json index d767d7095..aef58c6c9 100644 --- a/tests/translator/output/aws-cn/embedded_connectors_hardcoded_props.json +++ b/tests/translator/output/aws-cn/embedded_connectors_hardcoded_props.json @@ -1,6 +1,9 @@ { "Resources": { "MyApiV1": { + "Properties": { + "Name": "MyApiV1" + }, "Type": "AWS::ApiGateway::RestApi" }, "MyApiV1ApiV1ToLambdaWriteLambdaPermission": { @@ -40,6 +43,9 @@ "Type": "AWS::Lambda::Permission" }, "MyApiV2": { + "Properties": { + "Name": "MyApiV2" + }, "Type": "AWS::ApiGatewayV2::Api" }, "MyApiV2ApiV2ToLambdaWriteLambdaPermission": { @@ -79,6 +85,14 @@ "Type": "AWS::Lambda::Permission" }, "MyFunction": { + "Properties": { + "Code": { + "ZipFile": "exports.handler = function(event, context, callback) {\n return callback(null, 'success');\n}\n" + }, + "Handler": "index.handler", + "Role": "arn:aws:iam::111122223333:role/lambda-role", + "Runtime": "nodejs20.x" + }, "Type": "AWS::Lambda::Function" }, "MyQueue": { @@ -88,6 +102,9 @@ "Type": "AWS::SQS::Queue" }, "MyRule": { + "Properties": { + "ScheduleExpression": "rate(5 minutes)" + }, "Type": "AWS::Events::Rule" }, "MyRuleRuleToTopicTopicPolicy": { diff --git a/tests/translator/output/aws-cn/function_with_alias_and_all_properties_property.json b/tests/translator/output/aws-cn/function_with_alias_and_all_properties_property.json index 1741d9358..e1666ffa4 100644 --- a/tests/translator/output/aws-cn/function_with_alias_and_all_properties_property.json +++ b/tests/translator/output/aws-cn/function_with_alias_and_all_properties_property.json @@ -46,7 +46,7 @@ "PackageType": "Zip", "ReservedConcurrentExecutions": 100, "Role": { - "Fn::Sub": "arn:${AWS::Partition}:iam::role" + "Fn::Sub": "arn:${AWS::Partition}:iam::${AWS::AccountId}:role/role1" }, "Runtime": "python2.7", "Tags": [ @@ -74,7 +74,7 @@ }, "FunctionVersion": { "Fn::GetAtt": [ - "HashChangeFunctionVersion17ca83d2bf", + "HashChangeFunctionVersiona1a9d4d1e4", "Version" ] }, @@ -82,7 +82,7 @@ }, "Type": "AWS::Lambda::Alias" }, - "HashChangeFunctionVersion17ca83d2bf": { + "HashChangeFunctionVersiona1a9d4d1e4": { "DeletionPolicy": "Retain", "Properties": { "Description": "sam-testing", @@ -127,7 +127,7 @@ "PackageType": "Zip", "ReservedConcurrentExecutions": 100, "Role": { - "Fn::Sub": "arn:${AWS::Partition}:iam::role" + "Fn::Sub": "arn:${AWS::Partition}:iam::${AWS::AccountId}:role/role1" }, "Runtime": "python2.7", "Tags": [ diff --git a/tests/translator/output/aws-cn/function_with_cw_event.json b/tests/translator/output/aws-cn/function_with_cw_event.json index 4677081d1..fe9f87db8 100644 --- a/tests/translator/output/aws-cn/function_with_cw_event.json +++ b/tests/translator/output/aws-cn/function_with_cw_event.json @@ -12,7 +12,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/aws-cn/function_with_ignore_globals.json b/tests/translator/output/aws-cn/function_with_ignore_globals.json index 6b6e7ecea..df3f1b073 100644 --- a/tests/translator/output/aws-cn/function_with_ignore_globals.json +++ b/tests/translator/output/aws-cn/function_with_ignore_globals.json @@ -125,7 +125,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/aws-cn/function_with_null_events.json b/tests/translator/output/aws-cn/function_with_null_events.json index 5c3342984..28bce77ae 100644 --- a/tests/translator/output/aws-cn/function_with_null_events.json +++ b/tests/translator/output/aws-cn/function_with_null_events.json @@ -13,7 +13,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/aws-cn/function_with_runtime_config.json b/tests/translator/output/aws-cn/function_with_runtime_config.json index 3c580a92c..46b43274c 100644 --- a/tests/translator/output/aws-cn/function_with_runtime_config.json +++ b/tests/translator/output/aws-cn/function_with_runtime_config.json @@ -21,7 +21,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "RuntimeManagementConfig": { "RuntimeVersionArn": { "Ref": "RuntimeVersionParam" @@ -82,7 +82,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "RuntimeManagementConfig": { "UpdateRuntimeOn": { "Ref": "RuntimeUpdateParam" @@ -140,7 +140,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "RuntimeManagementConfig": { "UpdateRuntimeOn": "Auto" }, @@ -166,7 +166,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "RuntimeManagementConfig": { "UpdateRuntimeOn": "Auto" }, @@ -276,10 +276,10 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "RuntimeManagementConfig": { "RuntimeVersionArn": { - "Fn::Sub": "arn:aws:lambda:${AWS::Region}::runtime:python3.8::0af1966588ced06e3143ae720245c9b7aeaae213c6921c12c742a166679cc505" + "Fn::Sub": "arn:aws:lambda:${AWS::Region}::runtime:python3.11::0af1966588ced06e3143ae720245c9b7aeaae213c6921c12c742a166679cc505" }, "UpdateRuntimeOn": "Manual" }, diff --git a/tests/translator/output/aws-cn/function_with_sns_event_source_all_parameters.json b/tests/translator/output/aws-cn/function_with_sns_event_source_all_parameters.json index 4245545a5..b6dc7c98c 100644 --- a/tests/translator/output/aws-cn/function_with_sns_event_source_all_parameters.json +++ b/tests/translator/output/aws-cn/function_with_sns_event_source_all_parameters.json @@ -62,7 +62,7 @@ "FilterPolicyScope": "MessageAttributes", "Protocol": "lambda", "Region": "region", - "TopicArn": "topicArn-letsAddMoreSymbols" + "TopicArn": "arn:aws:sns:us-west-2:987654321098:key/dec86919-7219-4e8d-8871-7f1609df2c7f" }, "Type": "AWS::SNS::Subscription" }, @@ -73,7 +73,7 @@ "Ref": "MyAwesomeFunction" }, "Principal": "sns.amazonaws.com", - "SourceArn": "topicArn-letsAddMoreSymbols" + "SourceArn": "arn:aws:sns:us-west-2:987654321098:key/dec86919-7219-4e8d-8871-7f1609df2c7f" }, "Type": "AWS::Lambda::Permission" }, diff --git a/tests/translator/output/aws-cn/function_with_sourcekmskeyarn.json b/tests/translator/output/aws-cn/function_with_sourcekmskeyarn.json new file mode 100644 index 000000000..2bb689221 --- /dev/null +++ b/tests/translator/output/aws-cn/function_with_sourcekmskeyarn.json @@ -0,0 +1,120 @@ +{ + "Parameters": { + "SourceKMSKeyArnParam": { + "Default": "arn:aws:kms:us-west-2:123456789012:key/dec86919-7219-4e8d-8871-7f1609df2c7f", + "Type": "String" + } + }, + "Resources": { + "SourceKMSKeyArnFunction": { + "Properties": { + "Code": { + "S3Bucket": "sam-demo-bucket", + "S3Key": "hello.zip", + "SourceKMSKeyArn": "arn:aws:kms:us-west-2:987654321098:key/dec86919-7219-4e8d-8871-7f1609df2c7f" + }, + "Handler": "hello.handler", + "Role": { + "Fn::GetAtt": [ + "SourceKMSKeyArnFunctionRole", + "Arn" + ] + }, + "Runtime": "python3.9", + "Tags": [ + { + "Key": "lambda:createdBy", + "Value": "SAM" + } + ] + }, + "Type": "AWS::Lambda::Function" + }, + "SourceKMSKeyArnFunctionRole": { + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": [ + "sts:AssumeRole" + ], + "Effect": "Allow", + "Principal": { + "Service": [ + "lambda.amazonaws.com" + ] + } + } + ], + "Version": "2012-10-17" + }, + "ManagedPolicyArns": [ + "arn:aws-cn:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" + ], + "Tags": [ + { + "Key": "lambda:createdBy", + "Value": "SAM" + } + ] + }, + "Type": "AWS::IAM::Role" + }, + "SourceKMSKeyArnParameterFunction": { + "Properties": { + "Code": { + "S3Bucket": "sam-demo-bucket", + "S3Key": "hello.zip", + "SourceKMSKeyArn": { + "Ref": "SourceKMSKeyArnParam" + } + }, + "Handler": "hello.handler", + "Role": { + "Fn::GetAtt": [ + "SourceKMSKeyArnParameterFunctionRole", + "Arn" + ] + }, + "Runtime": "python3.9", + "Tags": [ + { + "Key": "lambda:createdBy", + "Value": "SAM" + } + ] + }, + "Type": "AWS::Lambda::Function" + }, + "SourceKMSKeyArnParameterFunctionRole": { + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": [ + "sts:AssumeRole" + ], + "Effect": "Allow", + "Principal": { + "Service": [ + "lambda.amazonaws.com" + ] + } + } + ], + "Version": "2012-10-17" + }, + "ManagedPolicyArns": [ + "arn:aws-cn:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" + ], + "Tags": [ + { + "Key": "lambda:createdBy", + "Value": "SAM" + } + ] + }, + "Type": "AWS::IAM::Role" + } + } +} diff --git a/tests/translator/output/aws-cn/globals_for_function.json b/tests/translator/output/aws-cn/globals_for_function.json index 39b356f1b..293cf5109 100644 --- a/tests/translator/output/aws-cn/globals_for_function.json +++ b/tests/translator/output/aws-cn/globals_for_function.json @@ -7,7 +7,8 @@ ], "Code": { "S3Bucket": "sam-demo-bucket", - "S3Key": "hello.zip" + "S3Key": "hello.zip", + "SourceKMSKeyArn": "arn:aws:kms:us-west-2:987654321098:key/dec86919-7219-4e8d-8871-7f1609df2c7f" }, "Environment": { "Variables": { @@ -84,7 +85,7 @@ }, "FunctionVersion": { "Fn::GetAtt": [ - "FunctionWithOverridesVersion096ed3b52b", + "FunctionWithOverridesVersionb52716e99f", "Version" ] }, @@ -133,7 +134,7 @@ }, "Type": "AWS::IAM::Role" }, - "FunctionWithOverridesVersion096ed3b52b": { + "FunctionWithOverridesVersionb52716e99f": { "DeletionPolicy": "Retain", "Properties": { "FunctionName": { @@ -149,7 +150,8 @@ ], "Code": { "S3Bucket": "global-bucket", - "S3Key": "global.zip" + "S3Key": "global.zip", + "SourceKMSKeyArn": "arn:aws:kms:us-west-2:123456789012:key/dec86919-7219-4e8d-8871-7f1609df2c7f" }, "Environment": { "Variables": { @@ -217,7 +219,7 @@ }, "FunctionVersion": { "Fn::GetAtt": [ - "MinimalFunctionVersione7c6f56e4d", + "MinimalFunctionVersion5244f38b49", "Version" ] }, @@ -262,7 +264,7 @@ }, "Type": "AWS::IAM::Role" }, - "MinimalFunctionVersione7c6f56e4d": { + "MinimalFunctionVersion5244f38b49": { "DeletionPolicy": "Retain", "Properties": { "FunctionName": { diff --git a/tests/translator/output/aws-cn/graphqlapi_cognito_default_auth.json b/tests/translator/output/aws-cn/graphqlapi_cognito_default_auth.json index e32b49b83..391715a04 100644 --- a/tests/translator/output/aws-cn/graphqlapi_cognito_default_auth.json +++ b/tests/translator/output/aws-cn/graphqlapi_cognito_default_auth.json @@ -30,7 +30,7 @@ "UserPoolConfig": { "AppIdClientRegex": "myregex", "AwsRegion": "na-east-1", - "DefaultAction": "something", + "DefaultAction": "ALLOW", "UserPoolId": "myid" }, "XrayEnabled": true diff --git a/tests/translator/output/aws-cn/graphqlapi_ddb_datasource_connector.json b/tests/translator/output/aws-cn/graphqlapi_ddb_datasource_connector.json index 6f452be90..d7ad26baf 100644 --- a/tests/translator/output/aws-cn/graphqlapi_ddb_datasource_connector.json +++ b/tests/translator/output/aws-cn/graphqlapi_ddb_datasource_connector.json @@ -104,12 +104,12 @@ ], "Effect": "Allow", "Resource": [ - "table-arn", + "arn:aws:dynamodb:us-west-2:987654321098:table/dec86919-7219-4e8d-8871-7f1609df2c7f", { "Fn::Sub": [ "${DestinationArn}/index/*", { - "DestinationArn": "table-arn" + "DestinationArn": "arn:aws:dynamodb:us-west-2:987654321098:table/dec86919-7219-4e8d-8871-7f1609df2c7f" } ] } @@ -127,12 +127,12 @@ ], "Effect": "Allow", "Resource": [ - "table-arn", + "arn:aws:dynamodb:us-west-2:987654321098:table/dec86919-7219-4e8d-8871-7f1609df2c7f", { "Fn::Sub": [ "${DestinationArn}/index/*", { - "DestinationArn": "table-arn" + "DestinationArn": "arn:aws:dynamodb:us-west-2:987654321098:table/dec86919-7219-4e8d-8871-7f1609df2c7f" } ] } @@ -248,12 +248,12 @@ ], "Effect": "Allow", "Resource": [ - "big-arn", + "arn:aws:dynamodb:us-west-2:987654321098:table/dec86919-7219-4e8d-8871-7f1609df2c7f", { "Fn::Sub": [ "${DestinationArn}/index/*", { - "DestinationArn": "big-arn" + "DestinationArn": "arn:aws:dynamodb:us-west-2:987654321098:table/dec86919-7219-4e8d-8871-7f1609df2c7f" } ] } @@ -271,12 +271,12 @@ ], "Effect": "Allow", "Resource": [ - "big-arn", + "arn:aws:dynamodb:us-west-2:987654321098:table/dec86919-7219-4e8d-8871-7f1609df2c7f", { "Fn::Sub": [ "${DestinationArn}/index/*", { - "DestinationArn": "big-arn" + "DestinationArn": "arn:aws:dynamodb:us-west-2:987654321098:table/dec86919-7219-4e8d-8871-7f1609df2c7f" } ] } diff --git a/tests/translator/output/aws-cn/graphqlapi_function_by_id.json b/tests/translator/output/aws-cn/graphqlapi_function_by_id.json index ea9a7fc1c..678cbf2bf 100644 --- a/tests/translator/output/aws-cn/graphqlapi_function_by_id.json +++ b/tests/translator/output/aws-cn/graphqlapi_function_by_id.json @@ -12,7 +12,7 @@ "DataSourceName": "some-cool-datasource", "Name": "MyFunction", "Runtime": { - "Name": "some-runtime", + "Name": "APPSYNC_JS", "RuntimeVersion": "1.2.3" } }, diff --git a/tests/translator/output/aws-cn/graphqlapi_lambda_datasource_connector.json b/tests/translator/output/aws-cn/graphqlapi_lambda_datasource_connector.json index 7e7548fa4..f8e9f3f21 100644 --- a/tests/translator/output/aws-cn/graphqlapi_lambda_datasource_connector.json +++ b/tests/translator/output/aws-cn/graphqlapi_lambda_datasource_connector.json @@ -66,7 +66,7 @@ ] }, "LambdaConfig": { - "LambdaFunctionArn": "blah" + "LambdaFunctionArn": "arn:aws:lambda:us-west-2:987654321098:function:dec86919-7219-4e8d-8871-7f1609df2c7f" }, "Name": "MyDataSource", "ServiceRoleArn": { @@ -123,12 +123,12 @@ ], "Effect": "Allow", "Resource": [ - "blah", + "arn:aws:lambda:us-west-2:987654321098:function:dec86919-7219-4e8d-8871-7f1609df2c7f", { "Fn::Sub": [ "${DestinationArn}:*", { - "DestinationArn": "blah" + "DestinationArn": "arn:aws:lambda:us-west-2:987654321098:function:dec86919-7219-4e8d-8871-7f1609df2c7f" } ] } diff --git a/tests/translator/output/aws-cn/graphqlapi_multiple_none_datasource_functions.json b/tests/translator/output/aws-cn/graphqlapi_multiple_none_datasource_functions.json index 43fb34207..a245392a3 100644 --- a/tests/translator/output/aws-cn/graphqlapi_multiple_none_datasource_functions.json +++ b/tests/translator/output/aws-cn/graphqlapi_multiple_none_datasource_functions.json @@ -39,7 +39,7 @@ }, "Name": "AnotherFunction", "Runtime": { - "Name": "some-runtime", + "Name": "APPSYNC_JS", "RuntimeVersion": "1.2.3" } }, @@ -88,7 +88,7 @@ }, "Name": "GoodFunction", "Runtime": { - "Name": "some-runtime", + "Name": "APPSYNC_JS", "RuntimeVersion": "1.2.3" } }, @@ -111,7 +111,7 @@ }, "Name": "MyFunction", "Runtime": { - "Name": "some-runtime", + "Name": "APPSYNC_JS", "RuntimeVersion": "1.2.3" } }, @@ -159,7 +159,7 @@ }, "Name": "SimilarFunction", "Runtime": { - "Name": "some-runtime", + "Name": "APPSYNC_JS", "RuntimeVersion": "1.2.3" } }, diff --git a/tests/translator/output/aws-cn/graphqlapi_resolver_function_with_lambda_datasource.json b/tests/translator/output/aws-cn/graphqlapi_resolver_function_with_lambda_datasource.json index dfce18342..c6021bf37 100644 --- a/tests/translator/output/aws-cn/graphqlapi_resolver_function_with_lambda_datasource.json +++ b/tests/translator/output/aws-cn/graphqlapi_resolver_function_with_lambda_datasource.json @@ -173,7 +173,7 @@ "MaxBatchSize": 10, "Name": "MyFunction", "Runtime": { - "Name": "some-runtime", + "Name": "APPSYNC_JS", "RuntimeVersion": "1.2.3" } }, diff --git a/tests/translator/output/aws-cn/inline_precedence.json b/tests/translator/output/aws-cn/inline_precedence.json index 51cf5d4a6..e6bdd079b 100644 --- a/tests/translator/output/aws-cn/inline_precedence.json +++ b/tests/translator/output/aws-cn/inline_precedence.json @@ -12,7 +12,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/aws-cn/managed_policies_everything.json b/tests/translator/output/aws-cn/managed_policies_everything.json index 1f5a57467..6340b370e 100644 --- a/tests/translator/output/aws-cn/managed_policies_everything.json +++ b/tests/translator/output/aws-cn/managed_policies_everything.json @@ -27,7 +27,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/aws-cn/managed_policies_minimal.json b/tests/translator/output/aws-cn/managed_policies_minimal.json index 79c1a514b..cdaa3d491 100644 --- a/tests/translator/output/aws-cn/managed_policies_minimal.json +++ b/tests/translator/output/aws-cn/managed_policies_minimal.json @@ -12,7 +12,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/aws-cn/schema_validation_4.json b/tests/translator/output/aws-cn/schema_validation_4.json index 27c10b5a6..7bf45a916 100644 --- a/tests/translator/output/aws-cn/schema_validation_4.json +++ b/tests/translator/output/aws-cn/schema_validation_4.json @@ -119,7 +119,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/aws-cn/state_machine_with_api.json b/tests/translator/output/aws-cn/state_machine_with_api.json index 0e355ebbe..d10441cb9 100644 --- a/tests/translator/output/aws-cn/state_machine_with_api.json +++ b/tests/translator/output/aws-cn/state_machine_with_api.json @@ -12,7 +12,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/aws-cn/state_machine_with_api_single.json b/tests/translator/output/aws-cn/state_machine_with_api_single.json index 706c4eca5..d18c4618a 100644 --- a/tests/translator/output/aws-cn/state_machine_with_api_single.json +++ b/tests/translator/output/aws-cn/state_machine_with_api_single.json @@ -22,7 +22,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/aws-cn/state_machine_with_events_and_alias.json b/tests/translator/output/aws-cn/state_machine_with_events_and_alias.json new file mode 100644 index 000000000..60f4052a5 --- /dev/null +++ b/tests/translator/output/aws-cn/state_machine_with_events_and_alias.json @@ -0,0 +1,427 @@ +{ + "Resources": { + "MyStateMachine": { + "Properties": { + "DefinitionString": { + "Fn::Join": [ + "\n", + [ + "{", + " \"StartAt\": \"HelloWorld\",", + " \"States\": {", + " \"HelloWorld\": {", + " \"End\": true,", + " \"Result\": 1,", + " \"Type\": \"Pass\"", + " }", + " }", + "}" + ] + ] + }, + "RoleArn": { + "Fn::Sub": "arn:${AWS::Partition}:iam::${AWS::AccountId}:role/doesNotExist" + }, + "StateMachineType": "STANDARD", + "Tags": [ + { + "Key": "stateMachine:createdBy", + "Value": "SAM" + } + ] + }, + "Type": "AWS::StepFunctions::StateMachine" + }, + "MyStateMachineAliastest": { + "Properties": { + "DeploymentPreference": { + "StateMachineVersionArn": { + "Ref": "MyStateMachineVersion" + }, + "Type": "ALL_AT_ONCE" + }, + "Name": "test" + }, + "Type": "AWS::StepFunctions::StateMachineAlias" + }, + "MyStateMachineApiEventRole": { + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": [ + "sts:AssumeRole" + ], + "Effect": "Allow", + "Principal": { + "Service": [ + "apigateway.amazonaws.com" + ] + } + } + ], + "Version": "2012-10-17" + }, + "Policies": [ + { + "PolicyDocument": { + "Statement": [ + { + "Action": "states:StartExecution", + "Effect": "Allow", + "Resource": { + "Ref": "MyStateMachineAliastest" + } + } + ] + }, + "PolicyName": "MyStateMachineApiEventRoleStartExecutionPolicy" + } + ] + }, + "Type": "AWS::IAM::Role" + }, + "MyStateMachineCWEvent": { + "Properties": { + "EventPattern": { + "detail": { + "state": [ + "terminated" + ] + } + }, + "Targets": [ + { + "Arn": { + "Ref": "MyStateMachineAliastest" + }, + "Id": "MyStateMachineCWEventStepFunctionsTarget", + "RoleArn": { + "Fn::GetAtt": [ + "MyStateMachineCWEventRole", + "Arn" + ] + } + } + ] + }, + "Type": "AWS::Events::Rule" + }, + "MyStateMachineCWEventRole": { + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": [ + "sts:AssumeRole" + ], + "Effect": "Allow", + "Principal": { + "Service": [ + "events.amazonaws.com" + ] + } + } + ], + "Version": "2012-10-17" + }, + "Policies": [ + { + "PolicyDocument": { + "Statement": [ + { + "Action": "states:StartExecution", + "Effect": "Allow", + "Resource": { + "Ref": "MyStateMachineAliastest" + } + } + ] + }, + "PolicyName": "MyStateMachineCWEventRoleStartExecutionPolicy" + } + ] + }, + "Type": "AWS::IAM::Role" + }, + "MyStateMachineCWSchedule": { + "Properties": { + "Description": "test schedule", + "Name": "TestSchedule", + "ScheduleExpression": "rate(1 minute)", + "State": "DISABLED", + "Targets": [ + { + "Arn": { + "Ref": "MyStateMachineAliastest" + }, + "Id": "MyStateMachineCWScheduleStepFunctionsTarget", + "RoleArn": { + "Fn::GetAtt": [ + "MyStateMachineCWScheduleRole", + "Arn" + ] + } + } + ] + }, + "Type": "AWS::Events::Rule" + }, + "MyStateMachineCWScheduleRole": { + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": [ + "sts:AssumeRole" + ], + "Effect": "Allow", + "Principal": { + "Service": [ + "events.amazonaws.com" + ] + } + } + ], + "Version": "2012-10-17" + }, + "Policies": [ + { + "PolicyDocument": { + "Statement": [ + { + "Action": "states:StartExecution", + "Effect": "Allow", + "Resource": { + "Ref": "MyStateMachineAliastest" + } + } + ] + }, + "PolicyName": "MyStateMachineCWScheduleRoleStartExecutionPolicy" + } + ] + }, + "Type": "AWS::IAM::Role" + }, + "MyStateMachineEBEvent": { + "Properties": { + "EventPattern": { + "source": [ + "aws.tag" + ] + }, + "Targets": [ + { + "Arn": { + "Ref": "MyStateMachineAliastest" + }, + "Id": "MyStateMachineEBEventStepFunctionsTarget", + "RoleArn": { + "Fn::GetAtt": [ + "MyStateMachineEBEventRole", + "Arn" + ] + } + } + ] + }, + "Type": "AWS::Events::Rule" + }, + "MyStateMachineEBEventRole": { + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": [ + "sts:AssumeRole" + ], + "Effect": "Allow", + "Principal": { + "Service": [ + "events.amazonaws.com" + ] + } + } + ], + "Version": "2012-10-17" + }, + "Policies": [ + { + "PolicyDocument": { + "Statement": [ + { + "Action": "states:StartExecution", + "Effect": "Allow", + "Resource": { + "Ref": "MyStateMachineAliastest" + } + } + ] + }, + "PolicyName": "MyStateMachineEBEventRoleStartExecutionPolicy" + } + ] + }, + "Type": "AWS::IAM::Role" + }, + "MyStateMachineScheduleEvent": { + "Properties": { + "FlexibleTimeWindow": { + "Mode": "OFF" + }, + "Name": "MyStateMachineScheduleEvent", + "ScheduleExpression": "rate(1 minute)", + "Target": { + "Arn": { + "Ref": "MyStateMachineAliastest" + }, + "RoleArn": { + "Fn::GetAtt": [ + "MyStateMachineScheduleEventRole", + "Arn" + ] + } + } + }, + "Type": "AWS::Scheduler::Schedule" + }, + "MyStateMachineScheduleEventRole": { + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": [ + "sts:AssumeRole" + ], + "Effect": "Allow", + "Principal": { + "Service": [ + "scheduler.amazonaws.com" + ] + } + } + ], + "Version": "2012-10-17" + }, + "Policies": [ + { + "PolicyDocument": { + "Statement": [ + { + "Action": "states:StartExecution", + "Effect": "Allow", + "Resource": { + "Ref": "MyStateMachineAliastest" + } + } + ] + }, + "PolicyName": "MyStateMachineScheduleEventStartExecutionPolicy" + } + ] + }, + "Type": "AWS::IAM::Role" + }, + "MyStateMachineVersion": { + "DeletionPolicy": "Retain", + "Properties": { + "StateMachineArn": { + "Ref": "MyStateMachine" + }, + "StateMachineRevisionId": { + "Fn::GetAtt": [ + "MyStateMachine", + "StateMachineRevisionId" + ] + } + }, + "Type": "AWS::StepFunctions::StateMachineVersion", + "UpdateReplacePolicy": "Retain" + }, + "ServerlessRestApi": { + "Properties": { + "Body": { + "info": { + "title": { + "Ref": "AWS::StackName" + }, + "version": "1.0" + }, + "paths": { + "/path": { + "get": { + "responses": { + "200": { + "description": "OK" + }, + "400": { + "description": "Bad Request" + } + }, + "x-amazon-apigateway-integration": { + "credentials": { + "Fn::GetAtt": [ + "MyStateMachineApiEventRole", + "Arn" + ] + }, + "httpMethod": "POST", + "requestTemplates": { + "application/json": { + "Fn::Sub": "{\"input\": \"$util.escapeJavaScript($input.json('$'))\", \"stateMachineArn\": \"${MyStateMachineAliastest}\"}" + } + }, + "responses": { + "200": { + "statusCode": "200" + }, + "400": { + "statusCode": "400" + } + }, + "type": "aws", + "uri": { + "Fn::Sub": "arn:${AWS::Partition}:apigateway:${AWS::Region}:states:action/StartExecution" + } + } + } + } + }, + "swagger": "2.0" + }, + "EndpointConfiguration": { + "Types": [ + "REGIONAL" + ] + }, + "Parameters": { + "endpointConfigurationTypes": "REGIONAL" + } + }, + "Type": "AWS::ApiGateway::RestApi" + }, + "ServerlessRestApiDeploymente6166edbc7": { + "Properties": { + "Description": "RestApi deployment id: e6166edbc7b05836f53278af31642807c36e76b3", + "RestApiId": { + "Ref": "ServerlessRestApi" + }, + "StageName": "Stage" + }, + "Type": "AWS::ApiGateway::Deployment" + }, + "ServerlessRestApiProdStage": { + "Properties": { + "DeploymentId": { + "Ref": "ServerlessRestApiDeploymente6166edbc7" + }, + "RestApiId": { + "Ref": "ServerlessRestApi" + }, + "StageName": "Prod" + }, + "Type": "AWS::ApiGateway::Stage" + } + } +} diff --git a/tests/translator/output/aws-cn/state_machine_with_sam_policy_templates.json b/tests/translator/output/aws-cn/state_machine_with_sam_policy_templates.json index afe1e2689..f759b27eb 100644 --- a/tests/translator/output/aws-cn/state_machine_with_sam_policy_templates.json +++ b/tests/translator/output/aws-cn/state_machine_with_sam_policy_templates.json @@ -394,7 +394,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/aws-us-gov/all_policy_templates.json b/tests/translator/output/aws-us-gov/all_policy_templates.json index 7f96aa0b0..3dc4ef5c5 100644 --- a/tests/translator/output/aws-us-gov/all_policy_templates.json +++ b/tests/translator/output/aws-us-gov/all_policy_templates.json @@ -1580,7 +1580,7 @@ "Fn::Sub": [ "${certificateArn}", { - "certificateArn": "arn" + "certificateArn": "arn:aws:acm:us-west-2:987654321098:certificate/dec86919-7219-4e8d-8871-7f1609df2c7f" } ] } diff --git a/tests/translator/output/aws-us-gov/api_http_with_default_iam_authorizer.json b/tests/translator/output/aws-us-gov/api_http_with_default_iam_authorizer.json index 6d778afbf..bc27df20b 100644 --- a/tests/translator/output/aws-us-gov/api_http_with_default_iam_authorizer.json +++ b/tests/translator/output/aws-us-gov/api_http_with_default_iam_authorizer.json @@ -16,7 +16,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/aws-us-gov/api_merge_definitions_with_any_method.json b/tests/translator/output/aws-us-gov/api_merge_definitions_with_any_method.json index 8ec856af8..1ee01d89e 100644 --- a/tests/translator/output/aws-us-gov/api_merge_definitions_with_any_method.json +++ b/tests/translator/output/aws-us-gov/api_merge_definitions_with_any_method.json @@ -54,7 +54,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/aws-us-gov/api_with_merge_definitions_null_paths.json b/tests/translator/output/aws-us-gov/api_with_merge_definitions_null_paths.json index 9da42fe51..1a4a14964 100644 --- a/tests/translator/output/aws-us-gov/api_with_merge_definitions_null_paths.json +++ b/tests/translator/output/aws-us-gov/api_with_merge_definitions_null_paths.json @@ -55,7 +55,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/aws-us-gov/cognito_user_pool_with_new_property_and_cognito_event.json b/tests/translator/output/aws-us-gov/cognito_user_pool_with_new_property_and_cognito_event.json index 2d688bb9b..3958494af 100644 --- a/tests/translator/output/aws-us-gov/cognito_user_pool_with_new_property_and_cognito_event.json +++ b/tests/translator/output/aws-us-gov/cognito_user_pool_with_new_property_and_cognito_event.json @@ -12,7 +12,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/aws-us-gov/connector_with_non_id_source_and_destination.json b/tests/translator/output/aws-us-gov/connector_with_non_id_source_and_destination.json index 26dadfc85..c730b0470 100644 --- a/tests/translator/output/aws-us-gov/connector_with_non_id_source_and_destination.json +++ b/tests/translator/output/aws-us-gov/connector_with_non_id_source_and_destination.json @@ -183,7 +183,9 @@ ] }, "ManagedPolicyArns": [ - "arn:{AWS::Partition}:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" + { + "Fn::Sub": "arn:${AWS::Partition}:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" + } ] }, "Type": "AWS::IAM::Role" diff --git a/tests/translator/output/aws-us-gov/embedded_connectors_hardcoded_props.json b/tests/translator/output/aws-us-gov/embedded_connectors_hardcoded_props.json index d767d7095..aef58c6c9 100644 --- a/tests/translator/output/aws-us-gov/embedded_connectors_hardcoded_props.json +++ b/tests/translator/output/aws-us-gov/embedded_connectors_hardcoded_props.json @@ -1,6 +1,9 @@ { "Resources": { "MyApiV1": { + "Properties": { + "Name": "MyApiV1" + }, "Type": "AWS::ApiGateway::RestApi" }, "MyApiV1ApiV1ToLambdaWriteLambdaPermission": { @@ -40,6 +43,9 @@ "Type": "AWS::Lambda::Permission" }, "MyApiV2": { + "Properties": { + "Name": "MyApiV2" + }, "Type": "AWS::ApiGatewayV2::Api" }, "MyApiV2ApiV2ToLambdaWriteLambdaPermission": { @@ -79,6 +85,14 @@ "Type": "AWS::Lambda::Permission" }, "MyFunction": { + "Properties": { + "Code": { + "ZipFile": "exports.handler = function(event, context, callback) {\n return callback(null, 'success');\n}\n" + }, + "Handler": "index.handler", + "Role": "arn:aws:iam::111122223333:role/lambda-role", + "Runtime": "nodejs20.x" + }, "Type": "AWS::Lambda::Function" }, "MyQueue": { @@ -88,6 +102,9 @@ "Type": "AWS::SQS::Queue" }, "MyRule": { + "Properties": { + "ScheduleExpression": "rate(5 minutes)" + }, "Type": "AWS::Events::Rule" }, "MyRuleRuleToTopicTopicPolicy": { diff --git a/tests/translator/output/aws-us-gov/function_with_alias_and_all_properties_property.json b/tests/translator/output/aws-us-gov/function_with_alias_and_all_properties_property.json index fed1deb8d..4f3db3c19 100644 --- a/tests/translator/output/aws-us-gov/function_with_alias_and_all_properties_property.json +++ b/tests/translator/output/aws-us-gov/function_with_alias_and_all_properties_property.json @@ -46,7 +46,7 @@ "PackageType": "Zip", "ReservedConcurrentExecutions": 100, "Role": { - "Fn::Sub": "arn:${AWS::Partition}:iam::role" + "Fn::Sub": "arn:${AWS::Partition}:iam::${AWS::AccountId}:role/role1" }, "Runtime": "python2.7", "Tags": [ @@ -74,7 +74,7 @@ }, "FunctionVersion": { "Fn::GetAtt": [ - "HashChangeFunctionVersion17ca83d2bf", + "HashChangeFunctionVersiona1a9d4d1e4", "Version" ] }, @@ -82,7 +82,7 @@ }, "Type": "AWS::Lambda::Alias" }, - "HashChangeFunctionVersion17ca83d2bf": { + "HashChangeFunctionVersiona1a9d4d1e4": { "DeletionPolicy": "Retain", "Properties": { "Description": "sam-testing", @@ -127,7 +127,7 @@ "PackageType": "Zip", "ReservedConcurrentExecutions": 100, "Role": { - "Fn::Sub": "arn:${AWS::Partition}:iam::role" + "Fn::Sub": "arn:${AWS::Partition}:iam::${AWS::AccountId}:role/role1" }, "Runtime": "python2.7", "Tags": [ diff --git a/tests/translator/output/aws-us-gov/function_with_cw_event.json b/tests/translator/output/aws-us-gov/function_with_cw_event.json index 0cccfd6fa..d82391fad 100644 --- a/tests/translator/output/aws-us-gov/function_with_cw_event.json +++ b/tests/translator/output/aws-us-gov/function_with_cw_event.json @@ -12,7 +12,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/aws-us-gov/function_with_ignore_globals.json b/tests/translator/output/aws-us-gov/function_with_ignore_globals.json index ad0011e53..1468e72af 100644 --- a/tests/translator/output/aws-us-gov/function_with_ignore_globals.json +++ b/tests/translator/output/aws-us-gov/function_with_ignore_globals.json @@ -125,7 +125,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/aws-us-gov/function_with_null_events.json b/tests/translator/output/aws-us-gov/function_with_null_events.json index 18620761f..5f0572afc 100644 --- a/tests/translator/output/aws-us-gov/function_with_null_events.json +++ b/tests/translator/output/aws-us-gov/function_with_null_events.json @@ -13,7 +13,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/aws-us-gov/function_with_runtime_config.json b/tests/translator/output/aws-us-gov/function_with_runtime_config.json index bd585a2af..91bd9cb7f 100644 --- a/tests/translator/output/aws-us-gov/function_with_runtime_config.json +++ b/tests/translator/output/aws-us-gov/function_with_runtime_config.json @@ -21,7 +21,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "RuntimeManagementConfig": { "RuntimeVersionArn": { "Ref": "RuntimeVersionParam" @@ -82,7 +82,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "RuntimeManagementConfig": { "UpdateRuntimeOn": { "Ref": "RuntimeUpdateParam" @@ -140,7 +140,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "RuntimeManagementConfig": { "UpdateRuntimeOn": "Auto" }, @@ -166,7 +166,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "RuntimeManagementConfig": { "UpdateRuntimeOn": "Auto" }, @@ -276,10 +276,10 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "RuntimeManagementConfig": { "RuntimeVersionArn": { - "Fn::Sub": "arn:aws:lambda:${AWS::Region}::runtime:python3.8::0af1966588ced06e3143ae720245c9b7aeaae213c6921c12c742a166679cc505" + "Fn::Sub": "arn:aws:lambda:${AWS::Region}::runtime:python3.11::0af1966588ced06e3143ae720245c9b7aeaae213c6921c12c742a166679cc505" }, "UpdateRuntimeOn": "Manual" }, diff --git a/tests/translator/output/aws-us-gov/function_with_sns_event_source_all_parameters.json b/tests/translator/output/aws-us-gov/function_with_sns_event_source_all_parameters.json index 4123e182a..3012b2a7f 100644 --- a/tests/translator/output/aws-us-gov/function_with_sns_event_source_all_parameters.json +++ b/tests/translator/output/aws-us-gov/function_with_sns_event_source_all_parameters.json @@ -62,7 +62,7 @@ "FilterPolicyScope": "MessageAttributes", "Protocol": "lambda", "Region": "region", - "TopicArn": "topicArn-letsAddMoreSymbols" + "TopicArn": "arn:aws:sns:us-west-2:987654321098:key/dec86919-7219-4e8d-8871-7f1609df2c7f" }, "Type": "AWS::SNS::Subscription" }, @@ -73,7 +73,7 @@ "Ref": "MyAwesomeFunction" }, "Principal": "sns.amazonaws.com", - "SourceArn": "topicArn-letsAddMoreSymbols" + "SourceArn": "arn:aws:sns:us-west-2:987654321098:key/dec86919-7219-4e8d-8871-7f1609df2c7f" }, "Type": "AWS::Lambda::Permission" }, diff --git a/tests/translator/output/aws-us-gov/function_with_sourcekmskeyarn.json b/tests/translator/output/aws-us-gov/function_with_sourcekmskeyarn.json new file mode 100644 index 000000000..139dca0e6 --- /dev/null +++ b/tests/translator/output/aws-us-gov/function_with_sourcekmskeyarn.json @@ -0,0 +1,120 @@ +{ + "Parameters": { + "SourceKMSKeyArnParam": { + "Default": "arn:aws:kms:us-west-2:123456789012:key/dec86919-7219-4e8d-8871-7f1609df2c7f", + "Type": "String" + } + }, + "Resources": { + "SourceKMSKeyArnFunction": { + "Properties": { + "Code": { + "S3Bucket": "sam-demo-bucket", + "S3Key": "hello.zip", + "SourceKMSKeyArn": "arn:aws:kms:us-west-2:987654321098:key/dec86919-7219-4e8d-8871-7f1609df2c7f" + }, + "Handler": "hello.handler", + "Role": { + "Fn::GetAtt": [ + "SourceKMSKeyArnFunctionRole", + "Arn" + ] + }, + "Runtime": "python3.9", + "Tags": [ + { + "Key": "lambda:createdBy", + "Value": "SAM" + } + ] + }, + "Type": "AWS::Lambda::Function" + }, + "SourceKMSKeyArnFunctionRole": { + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": [ + "sts:AssumeRole" + ], + "Effect": "Allow", + "Principal": { + "Service": [ + "lambda.amazonaws.com" + ] + } + } + ], + "Version": "2012-10-17" + }, + "ManagedPolicyArns": [ + "arn:aws-us-gov:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" + ], + "Tags": [ + { + "Key": "lambda:createdBy", + "Value": "SAM" + } + ] + }, + "Type": "AWS::IAM::Role" + }, + "SourceKMSKeyArnParameterFunction": { + "Properties": { + "Code": { + "S3Bucket": "sam-demo-bucket", + "S3Key": "hello.zip", + "SourceKMSKeyArn": { + "Ref": "SourceKMSKeyArnParam" + } + }, + "Handler": "hello.handler", + "Role": { + "Fn::GetAtt": [ + "SourceKMSKeyArnParameterFunctionRole", + "Arn" + ] + }, + "Runtime": "python3.9", + "Tags": [ + { + "Key": "lambda:createdBy", + "Value": "SAM" + } + ] + }, + "Type": "AWS::Lambda::Function" + }, + "SourceKMSKeyArnParameterFunctionRole": { + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": [ + "sts:AssumeRole" + ], + "Effect": "Allow", + "Principal": { + "Service": [ + "lambda.amazonaws.com" + ] + } + } + ], + "Version": "2012-10-17" + }, + "ManagedPolicyArns": [ + "arn:aws-us-gov:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" + ], + "Tags": [ + { + "Key": "lambda:createdBy", + "Value": "SAM" + } + ] + }, + "Type": "AWS::IAM::Role" + } + } +} diff --git a/tests/translator/output/aws-us-gov/globals_for_function.json b/tests/translator/output/aws-us-gov/globals_for_function.json index 7cb1d98a8..fceebdcdd 100644 --- a/tests/translator/output/aws-us-gov/globals_for_function.json +++ b/tests/translator/output/aws-us-gov/globals_for_function.json @@ -7,7 +7,8 @@ ], "Code": { "S3Bucket": "sam-demo-bucket", - "S3Key": "hello.zip" + "S3Key": "hello.zip", + "SourceKMSKeyArn": "arn:aws:kms:us-west-2:987654321098:key/dec86919-7219-4e8d-8871-7f1609df2c7f" }, "Environment": { "Variables": { @@ -84,7 +85,7 @@ }, "FunctionVersion": { "Fn::GetAtt": [ - "FunctionWithOverridesVersion096ed3b52b", + "FunctionWithOverridesVersionb52716e99f", "Version" ] }, @@ -133,7 +134,7 @@ }, "Type": "AWS::IAM::Role" }, - "FunctionWithOverridesVersion096ed3b52b": { + "FunctionWithOverridesVersionb52716e99f": { "DeletionPolicy": "Retain", "Properties": { "FunctionName": { @@ -149,7 +150,8 @@ ], "Code": { "S3Bucket": "global-bucket", - "S3Key": "global.zip" + "S3Key": "global.zip", + "SourceKMSKeyArn": "arn:aws:kms:us-west-2:123456789012:key/dec86919-7219-4e8d-8871-7f1609df2c7f" }, "Environment": { "Variables": { @@ -217,7 +219,7 @@ }, "FunctionVersion": { "Fn::GetAtt": [ - "MinimalFunctionVersione7c6f56e4d", + "MinimalFunctionVersion5244f38b49", "Version" ] }, @@ -262,7 +264,7 @@ }, "Type": "AWS::IAM::Role" }, - "MinimalFunctionVersione7c6f56e4d": { + "MinimalFunctionVersion5244f38b49": { "DeletionPolicy": "Retain", "Properties": { "FunctionName": { diff --git a/tests/translator/output/aws-us-gov/graphqlapi_cognito_default_auth.json b/tests/translator/output/aws-us-gov/graphqlapi_cognito_default_auth.json index e32b49b83..391715a04 100644 --- a/tests/translator/output/aws-us-gov/graphqlapi_cognito_default_auth.json +++ b/tests/translator/output/aws-us-gov/graphqlapi_cognito_default_auth.json @@ -30,7 +30,7 @@ "UserPoolConfig": { "AppIdClientRegex": "myregex", "AwsRegion": "na-east-1", - "DefaultAction": "something", + "DefaultAction": "ALLOW", "UserPoolId": "myid" }, "XrayEnabled": true diff --git a/tests/translator/output/aws-us-gov/graphqlapi_ddb_datasource_connector.json b/tests/translator/output/aws-us-gov/graphqlapi_ddb_datasource_connector.json index 6f452be90..d7ad26baf 100644 --- a/tests/translator/output/aws-us-gov/graphqlapi_ddb_datasource_connector.json +++ b/tests/translator/output/aws-us-gov/graphqlapi_ddb_datasource_connector.json @@ -104,12 +104,12 @@ ], "Effect": "Allow", "Resource": [ - "table-arn", + "arn:aws:dynamodb:us-west-2:987654321098:table/dec86919-7219-4e8d-8871-7f1609df2c7f", { "Fn::Sub": [ "${DestinationArn}/index/*", { - "DestinationArn": "table-arn" + "DestinationArn": "arn:aws:dynamodb:us-west-2:987654321098:table/dec86919-7219-4e8d-8871-7f1609df2c7f" } ] } @@ -127,12 +127,12 @@ ], "Effect": "Allow", "Resource": [ - "table-arn", + "arn:aws:dynamodb:us-west-2:987654321098:table/dec86919-7219-4e8d-8871-7f1609df2c7f", { "Fn::Sub": [ "${DestinationArn}/index/*", { - "DestinationArn": "table-arn" + "DestinationArn": "arn:aws:dynamodb:us-west-2:987654321098:table/dec86919-7219-4e8d-8871-7f1609df2c7f" } ] } @@ -248,12 +248,12 @@ ], "Effect": "Allow", "Resource": [ - "big-arn", + "arn:aws:dynamodb:us-west-2:987654321098:table/dec86919-7219-4e8d-8871-7f1609df2c7f", { "Fn::Sub": [ "${DestinationArn}/index/*", { - "DestinationArn": "big-arn" + "DestinationArn": "arn:aws:dynamodb:us-west-2:987654321098:table/dec86919-7219-4e8d-8871-7f1609df2c7f" } ] } @@ -271,12 +271,12 @@ ], "Effect": "Allow", "Resource": [ - "big-arn", + "arn:aws:dynamodb:us-west-2:987654321098:table/dec86919-7219-4e8d-8871-7f1609df2c7f", { "Fn::Sub": [ "${DestinationArn}/index/*", { - "DestinationArn": "big-arn" + "DestinationArn": "arn:aws:dynamodb:us-west-2:987654321098:table/dec86919-7219-4e8d-8871-7f1609df2c7f" } ] } diff --git a/tests/translator/output/aws-us-gov/graphqlapi_function_by_id.json b/tests/translator/output/aws-us-gov/graphqlapi_function_by_id.json index ea9a7fc1c..678cbf2bf 100644 --- a/tests/translator/output/aws-us-gov/graphqlapi_function_by_id.json +++ b/tests/translator/output/aws-us-gov/graphqlapi_function_by_id.json @@ -12,7 +12,7 @@ "DataSourceName": "some-cool-datasource", "Name": "MyFunction", "Runtime": { - "Name": "some-runtime", + "Name": "APPSYNC_JS", "RuntimeVersion": "1.2.3" } }, diff --git a/tests/translator/output/aws-us-gov/graphqlapi_lambda_datasource_connector.json b/tests/translator/output/aws-us-gov/graphqlapi_lambda_datasource_connector.json index 7e7548fa4..f8e9f3f21 100644 --- a/tests/translator/output/aws-us-gov/graphqlapi_lambda_datasource_connector.json +++ b/tests/translator/output/aws-us-gov/graphqlapi_lambda_datasource_connector.json @@ -66,7 +66,7 @@ ] }, "LambdaConfig": { - "LambdaFunctionArn": "blah" + "LambdaFunctionArn": "arn:aws:lambda:us-west-2:987654321098:function:dec86919-7219-4e8d-8871-7f1609df2c7f" }, "Name": "MyDataSource", "ServiceRoleArn": { @@ -123,12 +123,12 @@ ], "Effect": "Allow", "Resource": [ - "blah", + "arn:aws:lambda:us-west-2:987654321098:function:dec86919-7219-4e8d-8871-7f1609df2c7f", { "Fn::Sub": [ "${DestinationArn}:*", { - "DestinationArn": "blah" + "DestinationArn": "arn:aws:lambda:us-west-2:987654321098:function:dec86919-7219-4e8d-8871-7f1609df2c7f" } ] } diff --git a/tests/translator/output/aws-us-gov/graphqlapi_multiple_none_datasource_functions.json b/tests/translator/output/aws-us-gov/graphqlapi_multiple_none_datasource_functions.json index 43fb34207..a245392a3 100644 --- a/tests/translator/output/aws-us-gov/graphqlapi_multiple_none_datasource_functions.json +++ b/tests/translator/output/aws-us-gov/graphqlapi_multiple_none_datasource_functions.json @@ -39,7 +39,7 @@ }, "Name": "AnotherFunction", "Runtime": { - "Name": "some-runtime", + "Name": "APPSYNC_JS", "RuntimeVersion": "1.2.3" } }, @@ -88,7 +88,7 @@ }, "Name": "GoodFunction", "Runtime": { - "Name": "some-runtime", + "Name": "APPSYNC_JS", "RuntimeVersion": "1.2.3" } }, @@ -111,7 +111,7 @@ }, "Name": "MyFunction", "Runtime": { - "Name": "some-runtime", + "Name": "APPSYNC_JS", "RuntimeVersion": "1.2.3" } }, @@ -159,7 +159,7 @@ }, "Name": "SimilarFunction", "Runtime": { - "Name": "some-runtime", + "Name": "APPSYNC_JS", "RuntimeVersion": "1.2.3" } }, diff --git a/tests/translator/output/aws-us-gov/graphqlapi_resolver_function_with_lambda_datasource.json b/tests/translator/output/aws-us-gov/graphqlapi_resolver_function_with_lambda_datasource.json index dfce18342..c6021bf37 100644 --- a/tests/translator/output/aws-us-gov/graphqlapi_resolver_function_with_lambda_datasource.json +++ b/tests/translator/output/aws-us-gov/graphqlapi_resolver_function_with_lambda_datasource.json @@ -173,7 +173,7 @@ "MaxBatchSize": 10, "Name": "MyFunction", "Runtime": { - "Name": "some-runtime", + "Name": "APPSYNC_JS", "RuntimeVersion": "1.2.3" } }, diff --git a/tests/translator/output/aws-us-gov/inline_precedence.json b/tests/translator/output/aws-us-gov/inline_precedence.json index 8e94957b4..233946f39 100644 --- a/tests/translator/output/aws-us-gov/inline_precedence.json +++ b/tests/translator/output/aws-us-gov/inline_precedence.json @@ -12,7 +12,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/aws-us-gov/managed_policies_everything.json b/tests/translator/output/aws-us-gov/managed_policies_everything.json index 8d726b36e..457de474e 100644 --- a/tests/translator/output/aws-us-gov/managed_policies_everything.json +++ b/tests/translator/output/aws-us-gov/managed_policies_everything.json @@ -27,7 +27,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/aws-us-gov/managed_policies_minimal.json b/tests/translator/output/aws-us-gov/managed_policies_minimal.json index 2086a5129..4d6712762 100644 --- a/tests/translator/output/aws-us-gov/managed_policies_minimal.json +++ b/tests/translator/output/aws-us-gov/managed_policies_minimal.json @@ -12,7 +12,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/aws-us-gov/schema_validation_4.json b/tests/translator/output/aws-us-gov/schema_validation_4.json index 23a41fffc..96c4ab21a 100644 --- a/tests/translator/output/aws-us-gov/schema_validation_4.json +++ b/tests/translator/output/aws-us-gov/schema_validation_4.json @@ -119,7 +119,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/aws-us-gov/state_machine_with_api.json b/tests/translator/output/aws-us-gov/state_machine_with_api.json index 1293ceeed..206df14e6 100644 --- a/tests/translator/output/aws-us-gov/state_machine_with_api.json +++ b/tests/translator/output/aws-us-gov/state_machine_with_api.json @@ -12,7 +12,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/aws-us-gov/state_machine_with_api_single.json b/tests/translator/output/aws-us-gov/state_machine_with_api_single.json index 5b0d5b3d9..2648c24ff 100644 --- a/tests/translator/output/aws-us-gov/state_machine_with_api_single.json +++ b/tests/translator/output/aws-us-gov/state_machine_with_api_single.json @@ -22,7 +22,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/aws-us-gov/state_machine_with_events_and_alias.json b/tests/translator/output/aws-us-gov/state_machine_with_events_and_alias.json new file mode 100644 index 000000000..60f4052a5 --- /dev/null +++ b/tests/translator/output/aws-us-gov/state_machine_with_events_and_alias.json @@ -0,0 +1,427 @@ +{ + "Resources": { + "MyStateMachine": { + "Properties": { + "DefinitionString": { + "Fn::Join": [ + "\n", + [ + "{", + " \"StartAt\": \"HelloWorld\",", + " \"States\": {", + " \"HelloWorld\": {", + " \"End\": true,", + " \"Result\": 1,", + " \"Type\": \"Pass\"", + " }", + " }", + "}" + ] + ] + }, + "RoleArn": { + "Fn::Sub": "arn:${AWS::Partition}:iam::${AWS::AccountId}:role/doesNotExist" + }, + "StateMachineType": "STANDARD", + "Tags": [ + { + "Key": "stateMachine:createdBy", + "Value": "SAM" + } + ] + }, + "Type": "AWS::StepFunctions::StateMachine" + }, + "MyStateMachineAliastest": { + "Properties": { + "DeploymentPreference": { + "StateMachineVersionArn": { + "Ref": "MyStateMachineVersion" + }, + "Type": "ALL_AT_ONCE" + }, + "Name": "test" + }, + "Type": "AWS::StepFunctions::StateMachineAlias" + }, + "MyStateMachineApiEventRole": { + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": [ + "sts:AssumeRole" + ], + "Effect": "Allow", + "Principal": { + "Service": [ + "apigateway.amazonaws.com" + ] + } + } + ], + "Version": "2012-10-17" + }, + "Policies": [ + { + "PolicyDocument": { + "Statement": [ + { + "Action": "states:StartExecution", + "Effect": "Allow", + "Resource": { + "Ref": "MyStateMachineAliastest" + } + } + ] + }, + "PolicyName": "MyStateMachineApiEventRoleStartExecutionPolicy" + } + ] + }, + "Type": "AWS::IAM::Role" + }, + "MyStateMachineCWEvent": { + "Properties": { + "EventPattern": { + "detail": { + "state": [ + "terminated" + ] + } + }, + "Targets": [ + { + "Arn": { + "Ref": "MyStateMachineAliastest" + }, + "Id": "MyStateMachineCWEventStepFunctionsTarget", + "RoleArn": { + "Fn::GetAtt": [ + "MyStateMachineCWEventRole", + "Arn" + ] + } + } + ] + }, + "Type": "AWS::Events::Rule" + }, + "MyStateMachineCWEventRole": { + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": [ + "sts:AssumeRole" + ], + "Effect": "Allow", + "Principal": { + "Service": [ + "events.amazonaws.com" + ] + } + } + ], + "Version": "2012-10-17" + }, + "Policies": [ + { + "PolicyDocument": { + "Statement": [ + { + "Action": "states:StartExecution", + "Effect": "Allow", + "Resource": { + "Ref": "MyStateMachineAliastest" + } + } + ] + }, + "PolicyName": "MyStateMachineCWEventRoleStartExecutionPolicy" + } + ] + }, + "Type": "AWS::IAM::Role" + }, + "MyStateMachineCWSchedule": { + "Properties": { + "Description": "test schedule", + "Name": "TestSchedule", + "ScheduleExpression": "rate(1 minute)", + "State": "DISABLED", + "Targets": [ + { + "Arn": { + "Ref": "MyStateMachineAliastest" + }, + "Id": "MyStateMachineCWScheduleStepFunctionsTarget", + "RoleArn": { + "Fn::GetAtt": [ + "MyStateMachineCWScheduleRole", + "Arn" + ] + } + } + ] + }, + "Type": "AWS::Events::Rule" + }, + "MyStateMachineCWScheduleRole": { + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": [ + "sts:AssumeRole" + ], + "Effect": "Allow", + "Principal": { + "Service": [ + "events.amazonaws.com" + ] + } + } + ], + "Version": "2012-10-17" + }, + "Policies": [ + { + "PolicyDocument": { + "Statement": [ + { + "Action": "states:StartExecution", + "Effect": "Allow", + "Resource": { + "Ref": "MyStateMachineAliastest" + } + } + ] + }, + "PolicyName": "MyStateMachineCWScheduleRoleStartExecutionPolicy" + } + ] + }, + "Type": "AWS::IAM::Role" + }, + "MyStateMachineEBEvent": { + "Properties": { + "EventPattern": { + "source": [ + "aws.tag" + ] + }, + "Targets": [ + { + "Arn": { + "Ref": "MyStateMachineAliastest" + }, + "Id": "MyStateMachineEBEventStepFunctionsTarget", + "RoleArn": { + "Fn::GetAtt": [ + "MyStateMachineEBEventRole", + "Arn" + ] + } + } + ] + }, + "Type": "AWS::Events::Rule" + }, + "MyStateMachineEBEventRole": { + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": [ + "sts:AssumeRole" + ], + "Effect": "Allow", + "Principal": { + "Service": [ + "events.amazonaws.com" + ] + } + } + ], + "Version": "2012-10-17" + }, + "Policies": [ + { + "PolicyDocument": { + "Statement": [ + { + "Action": "states:StartExecution", + "Effect": "Allow", + "Resource": { + "Ref": "MyStateMachineAliastest" + } + } + ] + }, + "PolicyName": "MyStateMachineEBEventRoleStartExecutionPolicy" + } + ] + }, + "Type": "AWS::IAM::Role" + }, + "MyStateMachineScheduleEvent": { + "Properties": { + "FlexibleTimeWindow": { + "Mode": "OFF" + }, + "Name": "MyStateMachineScheduleEvent", + "ScheduleExpression": "rate(1 minute)", + "Target": { + "Arn": { + "Ref": "MyStateMachineAliastest" + }, + "RoleArn": { + "Fn::GetAtt": [ + "MyStateMachineScheduleEventRole", + "Arn" + ] + } + } + }, + "Type": "AWS::Scheduler::Schedule" + }, + "MyStateMachineScheduleEventRole": { + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": [ + "sts:AssumeRole" + ], + "Effect": "Allow", + "Principal": { + "Service": [ + "scheduler.amazonaws.com" + ] + } + } + ], + "Version": "2012-10-17" + }, + "Policies": [ + { + "PolicyDocument": { + "Statement": [ + { + "Action": "states:StartExecution", + "Effect": "Allow", + "Resource": { + "Ref": "MyStateMachineAliastest" + } + } + ] + }, + "PolicyName": "MyStateMachineScheduleEventStartExecutionPolicy" + } + ] + }, + "Type": "AWS::IAM::Role" + }, + "MyStateMachineVersion": { + "DeletionPolicy": "Retain", + "Properties": { + "StateMachineArn": { + "Ref": "MyStateMachine" + }, + "StateMachineRevisionId": { + "Fn::GetAtt": [ + "MyStateMachine", + "StateMachineRevisionId" + ] + } + }, + "Type": "AWS::StepFunctions::StateMachineVersion", + "UpdateReplacePolicy": "Retain" + }, + "ServerlessRestApi": { + "Properties": { + "Body": { + "info": { + "title": { + "Ref": "AWS::StackName" + }, + "version": "1.0" + }, + "paths": { + "/path": { + "get": { + "responses": { + "200": { + "description": "OK" + }, + "400": { + "description": "Bad Request" + } + }, + "x-amazon-apigateway-integration": { + "credentials": { + "Fn::GetAtt": [ + "MyStateMachineApiEventRole", + "Arn" + ] + }, + "httpMethod": "POST", + "requestTemplates": { + "application/json": { + "Fn::Sub": "{\"input\": \"$util.escapeJavaScript($input.json('$'))\", \"stateMachineArn\": \"${MyStateMachineAliastest}\"}" + } + }, + "responses": { + "200": { + "statusCode": "200" + }, + "400": { + "statusCode": "400" + } + }, + "type": "aws", + "uri": { + "Fn::Sub": "arn:${AWS::Partition}:apigateway:${AWS::Region}:states:action/StartExecution" + } + } + } + } + }, + "swagger": "2.0" + }, + "EndpointConfiguration": { + "Types": [ + "REGIONAL" + ] + }, + "Parameters": { + "endpointConfigurationTypes": "REGIONAL" + } + }, + "Type": "AWS::ApiGateway::RestApi" + }, + "ServerlessRestApiDeploymente6166edbc7": { + "Properties": { + "Description": "RestApi deployment id: e6166edbc7b05836f53278af31642807c36e76b3", + "RestApiId": { + "Ref": "ServerlessRestApi" + }, + "StageName": "Stage" + }, + "Type": "AWS::ApiGateway::Deployment" + }, + "ServerlessRestApiProdStage": { + "Properties": { + "DeploymentId": { + "Ref": "ServerlessRestApiDeploymente6166edbc7" + }, + "RestApiId": { + "Ref": "ServerlessRestApi" + }, + "StageName": "Prod" + }, + "Type": "AWS::ApiGateway::Stage" + } + } +} diff --git a/tests/translator/output/aws-us-gov/state_machine_with_sam_policy_templates.json b/tests/translator/output/aws-us-gov/state_machine_with_sam_policy_templates.json index e3e9297b1..c63baf7c2 100644 --- a/tests/translator/output/aws-us-gov/state_machine_with_sam_policy_templates.json +++ b/tests/translator/output/aws-us-gov/state_machine_with_sam_policy_templates.json @@ -394,7 +394,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/cognito_user_pool_with_new_property_and_cognito_event.json b/tests/translator/output/cognito_user_pool_with_new_property_and_cognito_event.json index 7636c5ffe..e3a0f8511 100644 --- a/tests/translator/output/cognito_user_pool_with_new_property_and_cognito_event.json +++ b/tests/translator/output/cognito_user_pool_with_new_property_and_cognito_event.json @@ -12,7 +12,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/connector_with_non_id_source_and_destination.json b/tests/translator/output/connector_with_non_id_source_and_destination.json index c2d8294ef..7fcdef089 100644 --- a/tests/translator/output/connector_with_non_id_source_and_destination.json +++ b/tests/translator/output/connector_with_non_id_source_and_destination.json @@ -183,7 +183,9 @@ ] }, "ManagedPolicyArns": [ - "arn:{AWS::Partition}:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" + { + "Fn::Sub": "arn:${AWS::Partition}:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" + } ] }, "Type": "AWS::IAM::Role" diff --git a/tests/translator/output/embedded_connectors_hardcoded_props.json b/tests/translator/output/embedded_connectors_hardcoded_props.json index d767d7095..aef58c6c9 100644 --- a/tests/translator/output/embedded_connectors_hardcoded_props.json +++ b/tests/translator/output/embedded_connectors_hardcoded_props.json @@ -1,6 +1,9 @@ { "Resources": { "MyApiV1": { + "Properties": { + "Name": "MyApiV1" + }, "Type": "AWS::ApiGateway::RestApi" }, "MyApiV1ApiV1ToLambdaWriteLambdaPermission": { @@ -40,6 +43,9 @@ "Type": "AWS::Lambda::Permission" }, "MyApiV2": { + "Properties": { + "Name": "MyApiV2" + }, "Type": "AWS::ApiGatewayV2::Api" }, "MyApiV2ApiV2ToLambdaWriteLambdaPermission": { @@ -79,6 +85,14 @@ "Type": "AWS::Lambda::Permission" }, "MyFunction": { + "Properties": { + "Code": { + "ZipFile": "exports.handler = function(event, context, callback) {\n return callback(null, 'success');\n}\n" + }, + "Handler": "index.handler", + "Role": "arn:aws:iam::111122223333:role/lambda-role", + "Runtime": "nodejs20.x" + }, "Type": "AWS::Lambda::Function" }, "MyQueue": { @@ -88,6 +102,9 @@ "Type": "AWS::SQS::Queue" }, "MyRule": { + "Properties": { + "ScheduleExpression": "rate(5 minutes)" + }, "Type": "AWS::Events::Rule" }, "MyRuleRuleToTopicTopicPolicy": { diff --git a/tests/translator/output/error_invalid_cors_dict.json b/tests/translator/output/error_invalid_cors_dict.json index 5a5da8656..848328401 100644 --- a/tests/translator/output/error_invalid_cors_dict.json +++ b/tests/translator/output/error_invalid_cors_dict.json @@ -3,12 +3,12 @@ "Invalid Serverless Application Specification document. ", "Number of errors found: 1. ", "Resource with id [ServerlessRestApi] is invalid. ", - "Invalid value for 'Cors' property" + "Invalid key 'Foo' for 'Cors' property." ], - "errorMessage": "Invalid Serverless Application Specification document. Number of errors found: 1. Resource with id [ServerlessRestApi] is invalid. Invalid value for 'Cors' property", + "errorMessage": "Invalid Serverless Application Specification document. Number of errors found: 1. Resource with id [ServerlessRestApi] is invalid. Invalid key 'Foo' for 'Cors' property.", "errors": [ { - "errorMessage": "Resource with id [ServerlessRestApi] is invalid. Invalid value for 'Cors' property" + "errorMessage": "Resource with id [ServerlessRestApi] is invalid. Invalid key 'Foo' for 'Cors' property." } ] } diff --git a/tests/translator/output/error_invalid_httpapi_cors_property.json b/tests/translator/output/error_invalid_httpapi_cors_property.json new file mode 100644 index 000000000..afa1c220e --- /dev/null +++ b/tests/translator/output/error_invalid_httpapi_cors_property.json @@ -0,0 +1,14 @@ +{ + "_autoGeneratedBreakdownErrorMessage": [ + "Invalid Serverless Application Specification document. ", + "Number of errors found: 1. ", + "Resource with id [HttpApi] is invalid. ", + "Invalid key 'AllowOrigin' for 'Cors' property." + ], + "errorMessage": "Invalid Serverless Application Specification document. Number of errors found: 1. Resource with id [HttpApi] is invalid. Invalid key 'AllowOrigin' for 'Cors' property.", + "errors": [ + { + "errorMessage": "Resource with id [HttpApi] is invalid. Invalid key 'AllowOrigin' for 'Cors' property." + } + ] +} diff --git a/tests/translator/output/function_with_alias_and_all_properties_property.json b/tests/translator/output/function_with_alias_and_all_properties_property.json index 10120e07e..a932c89cd 100644 --- a/tests/translator/output/function_with_alias_and_all_properties_property.json +++ b/tests/translator/output/function_with_alias_and_all_properties_property.json @@ -46,7 +46,7 @@ "PackageType": "Zip", "ReservedConcurrentExecutions": 100, "Role": { - "Fn::Sub": "arn:${AWS::Partition}:iam::role" + "Fn::Sub": "arn:${AWS::Partition}:iam::${AWS::AccountId}:role/role1" }, "Runtime": "python2.7", "Tags": [ @@ -74,7 +74,7 @@ }, "FunctionVersion": { "Fn::GetAtt": [ - "HashChangeFunctionVersion17ca83d2bf", + "HashChangeFunctionVersiona1a9d4d1e4", "Version" ] }, @@ -82,7 +82,7 @@ }, "Type": "AWS::Lambda::Alias" }, - "HashChangeFunctionVersion17ca83d2bf": { + "HashChangeFunctionVersiona1a9d4d1e4": { "DeletionPolicy": "Retain", "Properties": { "Description": "sam-testing", @@ -127,7 +127,7 @@ "PackageType": "Zip", "ReservedConcurrentExecutions": 100, "Role": { - "Fn::Sub": "arn:${AWS::Partition}:iam::role" + "Fn::Sub": "arn:${AWS::Partition}:iam::${AWS::AccountId}:role/role1" }, "Runtime": "python2.7", "Tags": [ diff --git a/tests/translator/output/function_with_cw_event.json b/tests/translator/output/function_with_cw_event.json index 745a57e6b..cd94b0d9d 100644 --- a/tests/translator/output/function_with_cw_event.json +++ b/tests/translator/output/function_with_cw_event.json @@ -12,7 +12,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/function_with_ignore_globals.json b/tests/translator/output/function_with_ignore_globals.json index 8c3add664..edb7d7cd2 100644 --- a/tests/translator/output/function_with_ignore_globals.json +++ b/tests/translator/output/function_with_ignore_globals.json @@ -125,7 +125,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/function_with_null_events.json b/tests/translator/output/function_with_null_events.json index 7cc118ad2..b1bbcdad5 100644 --- a/tests/translator/output/function_with_null_events.json +++ b/tests/translator/output/function_with_null_events.json @@ -13,7 +13,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/function_with_runtime_config.json b/tests/translator/output/function_with_runtime_config.json index 1a523f7fb..60f79deb0 100644 --- a/tests/translator/output/function_with_runtime_config.json +++ b/tests/translator/output/function_with_runtime_config.json @@ -21,7 +21,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "RuntimeManagementConfig": { "RuntimeVersionArn": { "Ref": "RuntimeVersionParam" @@ -82,7 +82,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "RuntimeManagementConfig": { "UpdateRuntimeOn": { "Ref": "RuntimeUpdateParam" @@ -140,7 +140,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "RuntimeManagementConfig": { "UpdateRuntimeOn": "Auto" }, @@ -166,7 +166,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "RuntimeManagementConfig": { "UpdateRuntimeOn": "Auto" }, @@ -276,10 +276,10 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "RuntimeManagementConfig": { "RuntimeVersionArn": { - "Fn::Sub": "arn:aws:lambda:${AWS::Region}::runtime:python3.8::0af1966588ced06e3143ae720245c9b7aeaae213c6921c12c742a166679cc505" + "Fn::Sub": "arn:aws:lambda:${AWS::Region}::runtime:python3.11::0af1966588ced06e3143ae720245c9b7aeaae213c6921c12c742a166679cc505" }, "UpdateRuntimeOn": "Manual" }, diff --git a/tests/translator/output/function_with_sns_event_source_all_parameters.json b/tests/translator/output/function_with_sns_event_source_all_parameters.json index ff8c54989..52daff818 100644 --- a/tests/translator/output/function_with_sns_event_source_all_parameters.json +++ b/tests/translator/output/function_with_sns_event_source_all_parameters.json @@ -62,7 +62,7 @@ "FilterPolicyScope": "MessageAttributes", "Protocol": "lambda", "Region": "region", - "TopicArn": "topicArn-letsAddMoreSymbols" + "TopicArn": "arn:aws:sns:us-west-2:987654321098:key/dec86919-7219-4e8d-8871-7f1609df2c7f" }, "Type": "AWS::SNS::Subscription" }, @@ -73,7 +73,7 @@ "Ref": "MyAwesomeFunction" }, "Principal": "sns.amazonaws.com", - "SourceArn": "topicArn-letsAddMoreSymbols" + "SourceArn": "arn:aws:sns:us-west-2:987654321098:key/dec86919-7219-4e8d-8871-7f1609df2c7f" }, "Type": "AWS::Lambda::Permission" }, diff --git a/tests/translator/output/function_with_sourcekmskeyarn.json b/tests/translator/output/function_with_sourcekmskeyarn.json new file mode 100644 index 000000000..cfe9d0f04 --- /dev/null +++ b/tests/translator/output/function_with_sourcekmskeyarn.json @@ -0,0 +1,120 @@ +{ + "Parameters": { + "SourceKMSKeyArnParam": { + "Default": "arn:aws:kms:us-west-2:123456789012:key/dec86919-7219-4e8d-8871-7f1609df2c7f", + "Type": "String" + } + }, + "Resources": { + "SourceKMSKeyArnFunction": { + "Properties": { + "Code": { + "S3Bucket": "sam-demo-bucket", + "S3Key": "hello.zip", + "SourceKMSKeyArn": "arn:aws:kms:us-west-2:987654321098:key/dec86919-7219-4e8d-8871-7f1609df2c7f" + }, + "Handler": "hello.handler", + "Role": { + "Fn::GetAtt": [ + "SourceKMSKeyArnFunctionRole", + "Arn" + ] + }, + "Runtime": "python3.9", + "Tags": [ + { + "Key": "lambda:createdBy", + "Value": "SAM" + } + ] + }, + "Type": "AWS::Lambda::Function" + }, + "SourceKMSKeyArnFunctionRole": { + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": [ + "sts:AssumeRole" + ], + "Effect": "Allow", + "Principal": { + "Service": [ + "lambda.amazonaws.com" + ] + } + } + ], + "Version": "2012-10-17" + }, + "ManagedPolicyArns": [ + "arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" + ], + "Tags": [ + { + "Key": "lambda:createdBy", + "Value": "SAM" + } + ] + }, + "Type": "AWS::IAM::Role" + }, + "SourceKMSKeyArnParameterFunction": { + "Properties": { + "Code": { + "S3Bucket": "sam-demo-bucket", + "S3Key": "hello.zip", + "SourceKMSKeyArn": { + "Ref": "SourceKMSKeyArnParam" + } + }, + "Handler": "hello.handler", + "Role": { + "Fn::GetAtt": [ + "SourceKMSKeyArnParameterFunctionRole", + "Arn" + ] + }, + "Runtime": "python3.9", + "Tags": [ + { + "Key": "lambda:createdBy", + "Value": "SAM" + } + ] + }, + "Type": "AWS::Lambda::Function" + }, + "SourceKMSKeyArnParameterFunctionRole": { + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": [ + "sts:AssumeRole" + ], + "Effect": "Allow", + "Principal": { + "Service": [ + "lambda.amazonaws.com" + ] + } + } + ], + "Version": "2012-10-17" + }, + "ManagedPolicyArns": [ + "arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" + ], + "Tags": [ + { + "Key": "lambda:createdBy", + "Value": "SAM" + } + ] + }, + "Type": "AWS::IAM::Role" + } + } +} diff --git a/tests/translator/output/globals_for_function.json b/tests/translator/output/globals_for_function.json index 375a948ca..d3749cd9a 100644 --- a/tests/translator/output/globals_for_function.json +++ b/tests/translator/output/globals_for_function.json @@ -7,7 +7,8 @@ ], "Code": { "S3Bucket": "sam-demo-bucket", - "S3Key": "hello.zip" + "S3Key": "hello.zip", + "SourceKMSKeyArn": "arn:aws:kms:us-west-2:987654321098:key/dec86919-7219-4e8d-8871-7f1609df2c7f" }, "Environment": { "Variables": { @@ -84,7 +85,7 @@ }, "FunctionVersion": { "Fn::GetAtt": [ - "FunctionWithOverridesVersion096ed3b52b", + "FunctionWithOverridesVersionb52716e99f", "Version" ] }, @@ -133,7 +134,7 @@ }, "Type": "AWS::IAM::Role" }, - "FunctionWithOverridesVersion096ed3b52b": { + "FunctionWithOverridesVersionb52716e99f": { "DeletionPolicy": "Retain", "Properties": { "FunctionName": { @@ -149,7 +150,8 @@ ], "Code": { "S3Bucket": "global-bucket", - "S3Key": "global.zip" + "S3Key": "global.zip", + "SourceKMSKeyArn": "arn:aws:kms:us-west-2:123456789012:key/dec86919-7219-4e8d-8871-7f1609df2c7f" }, "Environment": { "Variables": { @@ -217,7 +219,7 @@ }, "FunctionVersion": { "Fn::GetAtt": [ - "MinimalFunctionVersione7c6f56e4d", + "MinimalFunctionVersion5244f38b49", "Version" ] }, @@ -262,7 +264,7 @@ }, "Type": "AWS::IAM::Role" }, - "MinimalFunctionVersione7c6f56e4d": { + "MinimalFunctionVersion5244f38b49": { "DeletionPolicy": "Retain", "Properties": { "FunctionName": { diff --git a/tests/translator/output/graphqlapi_cognito_default_auth.json b/tests/translator/output/graphqlapi_cognito_default_auth.json index e32b49b83..391715a04 100644 --- a/tests/translator/output/graphqlapi_cognito_default_auth.json +++ b/tests/translator/output/graphqlapi_cognito_default_auth.json @@ -30,7 +30,7 @@ "UserPoolConfig": { "AppIdClientRegex": "myregex", "AwsRegion": "na-east-1", - "DefaultAction": "something", + "DefaultAction": "ALLOW", "UserPoolId": "myid" }, "XrayEnabled": true diff --git a/tests/translator/output/graphqlapi_ddb_datasource_connector.json b/tests/translator/output/graphqlapi_ddb_datasource_connector.json index 6f452be90..d7ad26baf 100644 --- a/tests/translator/output/graphqlapi_ddb_datasource_connector.json +++ b/tests/translator/output/graphqlapi_ddb_datasource_connector.json @@ -104,12 +104,12 @@ ], "Effect": "Allow", "Resource": [ - "table-arn", + "arn:aws:dynamodb:us-west-2:987654321098:table/dec86919-7219-4e8d-8871-7f1609df2c7f", { "Fn::Sub": [ "${DestinationArn}/index/*", { - "DestinationArn": "table-arn" + "DestinationArn": "arn:aws:dynamodb:us-west-2:987654321098:table/dec86919-7219-4e8d-8871-7f1609df2c7f" } ] } @@ -127,12 +127,12 @@ ], "Effect": "Allow", "Resource": [ - "table-arn", + "arn:aws:dynamodb:us-west-2:987654321098:table/dec86919-7219-4e8d-8871-7f1609df2c7f", { "Fn::Sub": [ "${DestinationArn}/index/*", { - "DestinationArn": "table-arn" + "DestinationArn": "arn:aws:dynamodb:us-west-2:987654321098:table/dec86919-7219-4e8d-8871-7f1609df2c7f" } ] } @@ -248,12 +248,12 @@ ], "Effect": "Allow", "Resource": [ - "big-arn", + "arn:aws:dynamodb:us-west-2:987654321098:table/dec86919-7219-4e8d-8871-7f1609df2c7f", { "Fn::Sub": [ "${DestinationArn}/index/*", { - "DestinationArn": "big-arn" + "DestinationArn": "arn:aws:dynamodb:us-west-2:987654321098:table/dec86919-7219-4e8d-8871-7f1609df2c7f" } ] } @@ -271,12 +271,12 @@ ], "Effect": "Allow", "Resource": [ - "big-arn", + "arn:aws:dynamodb:us-west-2:987654321098:table/dec86919-7219-4e8d-8871-7f1609df2c7f", { "Fn::Sub": [ "${DestinationArn}/index/*", { - "DestinationArn": "big-arn" + "DestinationArn": "arn:aws:dynamodb:us-west-2:987654321098:table/dec86919-7219-4e8d-8871-7f1609df2c7f" } ] } diff --git a/tests/translator/output/graphqlapi_function_by_id.json b/tests/translator/output/graphqlapi_function_by_id.json index ea9a7fc1c..678cbf2bf 100644 --- a/tests/translator/output/graphqlapi_function_by_id.json +++ b/tests/translator/output/graphqlapi_function_by_id.json @@ -12,7 +12,7 @@ "DataSourceName": "some-cool-datasource", "Name": "MyFunction", "Runtime": { - "Name": "some-runtime", + "Name": "APPSYNC_JS", "RuntimeVersion": "1.2.3" } }, diff --git a/tests/translator/output/graphqlapi_lambda_datasource_connector.json b/tests/translator/output/graphqlapi_lambda_datasource_connector.json index 7e7548fa4..f8e9f3f21 100644 --- a/tests/translator/output/graphqlapi_lambda_datasource_connector.json +++ b/tests/translator/output/graphqlapi_lambda_datasource_connector.json @@ -66,7 +66,7 @@ ] }, "LambdaConfig": { - "LambdaFunctionArn": "blah" + "LambdaFunctionArn": "arn:aws:lambda:us-west-2:987654321098:function:dec86919-7219-4e8d-8871-7f1609df2c7f" }, "Name": "MyDataSource", "ServiceRoleArn": { @@ -123,12 +123,12 @@ ], "Effect": "Allow", "Resource": [ - "blah", + "arn:aws:lambda:us-west-2:987654321098:function:dec86919-7219-4e8d-8871-7f1609df2c7f", { "Fn::Sub": [ "${DestinationArn}:*", { - "DestinationArn": "blah" + "DestinationArn": "arn:aws:lambda:us-west-2:987654321098:function:dec86919-7219-4e8d-8871-7f1609df2c7f" } ] } diff --git a/tests/translator/output/graphqlapi_multiple_none_datasource_functions.json b/tests/translator/output/graphqlapi_multiple_none_datasource_functions.json index 43fb34207..a245392a3 100644 --- a/tests/translator/output/graphqlapi_multiple_none_datasource_functions.json +++ b/tests/translator/output/graphqlapi_multiple_none_datasource_functions.json @@ -39,7 +39,7 @@ }, "Name": "AnotherFunction", "Runtime": { - "Name": "some-runtime", + "Name": "APPSYNC_JS", "RuntimeVersion": "1.2.3" } }, @@ -88,7 +88,7 @@ }, "Name": "GoodFunction", "Runtime": { - "Name": "some-runtime", + "Name": "APPSYNC_JS", "RuntimeVersion": "1.2.3" } }, @@ -111,7 +111,7 @@ }, "Name": "MyFunction", "Runtime": { - "Name": "some-runtime", + "Name": "APPSYNC_JS", "RuntimeVersion": "1.2.3" } }, @@ -159,7 +159,7 @@ }, "Name": "SimilarFunction", "Runtime": { - "Name": "some-runtime", + "Name": "APPSYNC_JS", "RuntimeVersion": "1.2.3" } }, diff --git a/tests/translator/output/graphqlapi_resolver_function_with_lambda_datasource.json b/tests/translator/output/graphqlapi_resolver_function_with_lambda_datasource.json index dfce18342..c6021bf37 100644 --- a/tests/translator/output/graphqlapi_resolver_function_with_lambda_datasource.json +++ b/tests/translator/output/graphqlapi_resolver_function_with_lambda_datasource.json @@ -173,7 +173,7 @@ "MaxBatchSize": 10, "Name": "MyFunction", "Runtime": { - "Name": "some-runtime", + "Name": "APPSYNC_JS", "RuntimeVersion": "1.2.3" } }, diff --git a/tests/translator/output/inline_precedence.json b/tests/translator/output/inline_precedence.json index 423d7d291..7fd836c8f 100644 --- a/tests/translator/output/inline_precedence.json +++ b/tests/translator/output/inline_precedence.json @@ -12,7 +12,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/managed_policies_everything.json b/tests/translator/output/managed_policies_everything.json index a5d744384..7eba902ca 100644 --- a/tests/translator/output/managed_policies_everything.json +++ b/tests/translator/output/managed_policies_everything.json @@ -27,7 +27,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/managed_policies_minimal.json b/tests/translator/output/managed_policies_minimal.json index 87a39ca3c..8c99ddad2 100644 --- a/tests/translator/output/managed_policies_minimal.json +++ b/tests/translator/output/managed_policies_minimal.json @@ -12,7 +12,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/schema_validation_4.json b/tests/translator/output/schema_validation_4.json index 546e207e3..3c061e2d3 100644 --- a/tests/translator/output/schema_validation_4.json +++ b/tests/translator/output/schema_validation_4.json @@ -119,7 +119,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/state_machine_with_api.json b/tests/translator/output/state_machine_with_api.json index fd0c27c20..578b67e59 100644 --- a/tests/translator/output/state_machine_with_api.json +++ b/tests/translator/output/state_machine_with_api.json @@ -12,7 +12,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/state_machine_with_api_single.json b/tests/translator/output/state_machine_with_api_single.json index aca9c79ba..1f5ef12c7 100644 --- a/tests/translator/output/state_machine_with_api_single.json +++ b/tests/translator/output/state_machine_with_api_single.json @@ -22,7 +22,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/output/state_machine_with_events_and_alias.json b/tests/translator/output/state_machine_with_events_and_alias.json new file mode 100644 index 000000000..1f4567f89 --- /dev/null +++ b/tests/translator/output/state_machine_with_events_and_alias.json @@ -0,0 +1,419 @@ +{ + "Resources": { + "MyStateMachine": { + "Properties": { + "DefinitionString": { + "Fn::Join": [ + "\n", + [ + "{", + " \"StartAt\": \"HelloWorld\",", + " \"States\": {", + " \"HelloWorld\": {", + " \"End\": true,", + " \"Result\": 1,", + " \"Type\": \"Pass\"", + " }", + " }", + "}" + ] + ] + }, + "RoleArn": { + "Fn::Sub": "arn:${AWS::Partition}:iam::${AWS::AccountId}:role/doesNotExist" + }, + "StateMachineType": "STANDARD", + "Tags": [ + { + "Key": "stateMachine:createdBy", + "Value": "SAM" + } + ] + }, + "Type": "AWS::StepFunctions::StateMachine" + }, + "MyStateMachineAliastest": { + "Properties": { + "DeploymentPreference": { + "StateMachineVersionArn": { + "Ref": "MyStateMachineVersion" + }, + "Type": "ALL_AT_ONCE" + }, + "Name": "test" + }, + "Type": "AWS::StepFunctions::StateMachineAlias" + }, + "MyStateMachineApiEventRole": { + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": [ + "sts:AssumeRole" + ], + "Effect": "Allow", + "Principal": { + "Service": [ + "apigateway.amazonaws.com" + ] + } + } + ], + "Version": "2012-10-17" + }, + "Policies": [ + { + "PolicyDocument": { + "Statement": [ + { + "Action": "states:StartExecution", + "Effect": "Allow", + "Resource": { + "Ref": "MyStateMachineAliastest" + } + } + ] + }, + "PolicyName": "MyStateMachineApiEventRoleStartExecutionPolicy" + } + ] + }, + "Type": "AWS::IAM::Role" + }, + "MyStateMachineCWEvent": { + "Properties": { + "EventPattern": { + "detail": { + "state": [ + "terminated" + ] + } + }, + "Targets": [ + { + "Arn": { + "Ref": "MyStateMachineAliastest" + }, + "Id": "MyStateMachineCWEventStepFunctionsTarget", + "RoleArn": { + "Fn::GetAtt": [ + "MyStateMachineCWEventRole", + "Arn" + ] + } + } + ] + }, + "Type": "AWS::Events::Rule" + }, + "MyStateMachineCWEventRole": { + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": [ + "sts:AssumeRole" + ], + "Effect": "Allow", + "Principal": { + "Service": [ + "events.amazonaws.com" + ] + } + } + ], + "Version": "2012-10-17" + }, + "Policies": [ + { + "PolicyDocument": { + "Statement": [ + { + "Action": "states:StartExecution", + "Effect": "Allow", + "Resource": { + "Ref": "MyStateMachineAliastest" + } + } + ] + }, + "PolicyName": "MyStateMachineCWEventRoleStartExecutionPolicy" + } + ] + }, + "Type": "AWS::IAM::Role" + }, + "MyStateMachineCWSchedule": { + "Properties": { + "Description": "test schedule", + "Name": "TestSchedule", + "ScheduleExpression": "rate(1 minute)", + "State": "DISABLED", + "Targets": [ + { + "Arn": { + "Ref": "MyStateMachineAliastest" + }, + "Id": "MyStateMachineCWScheduleStepFunctionsTarget", + "RoleArn": { + "Fn::GetAtt": [ + "MyStateMachineCWScheduleRole", + "Arn" + ] + } + } + ] + }, + "Type": "AWS::Events::Rule" + }, + "MyStateMachineCWScheduleRole": { + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": [ + "sts:AssumeRole" + ], + "Effect": "Allow", + "Principal": { + "Service": [ + "events.amazonaws.com" + ] + } + } + ], + "Version": "2012-10-17" + }, + "Policies": [ + { + "PolicyDocument": { + "Statement": [ + { + "Action": "states:StartExecution", + "Effect": "Allow", + "Resource": { + "Ref": "MyStateMachineAliastest" + } + } + ] + }, + "PolicyName": "MyStateMachineCWScheduleRoleStartExecutionPolicy" + } + ] + }, + "Type": "AWS::IAM::Role" + }, + "MyStateMachineEBEvent": { + "Properties": { + "EventPattern": { + "source": [ + "aws.tag" + ] + }, + "Targets": [ + { + "Arn": { + "Ref": "MyStateMachineAliastest" + }, + "Id": "MyStateMachineEBEventStepFunctionsTarget", + "RoleArn": { + "Fn::GetAtt": [ + "MyStateMachineEBEventRole", + "Arn" + ] + } + } + ] + }, + "Type": "AWS::Events::Rule" + }, + "MyStateMachineEBEventRole": { + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": [ + "sts:AssumeRole" + ], + "Effect": "Allow", + "Principal": { + "Service": [ + "events.amazonaws.com" + ] + } + } + ], + "Version": "2012-10-17" + }, + "Policies": [ + { + "PolicyDocument": { + "Statement": [ + { + "Action": "states:StartExecution", + "Effect": "Allow", + "Resource": { + "Ref": "MyStateMachineAliastest" + } + } + ] + }, + "PolicyName": "MyStateMachineEBEventRoleStartExecutionPolicy" + } + ] + }, + "Type": "AWS::IAM::Role" + }, + "MyStateMachineScheduleEvent": { + "Properties": { + "FlexibleTimeWindow": { + "Mode": "OFF" + }, + "Name": "MyStateMachineScheduleEvent", + "ScheduleExpression": "rate(1 minute)", + "Target": { + "Arn": { + "Ref": "MyStateMachineAliastest" + }, + "RoleArn": { + "Fn::GetAtt": [ + "MyStateMachineScheduleEventRole", + "Arn" + ] + } + } + }, + "Type": "AWS::Scheduler::Schedule" + }, + "MyStateMachineScheduleEventRole": { + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": [ + "sts:AssumeRole" + ], + "Effect": "Allow", + "Principal": { + "Service": [ + "scheduler.amazonaws.com" + ] + } + } + ], + "Version": "2012-10-17" + }, + "Policies": [ + { + "PolicyDocument": { + "Statement": [ + { + "Action": "states:StartExecution", + "Effect": "Allow", + "Resource": { + "Ref": "MyStateMachineAliastest" + } + } + ] + }, + "PolicyName": "MyStateMachineScheduleEventStartExecutionPolicy" + } + ] + }, + "Type": "AWS::IAM::Role" + }, + "MyStateMachineVersion": { + "DeletionPolicy": "Retain", + "Properties": { + "StateMachineArn": { + "Ref": "MyStateMachine" + }, + "StateMachineRevisionId": { + "Fn::GetAtt": [ + "MyStateMachine", + "StateMachineRevisionId" + ] + } + }, + "Type": "AWS::StepFunctions::StateMachineVersion", + "UpdateReplacePolicy": "Retain" + }, + "ServerlessRestApi": { + "Properties": { + "Body": { + "info": { + "title": { + "Ref": "AWS::StackName" + }, + "version": "1.0" + }, + "paths": { + "/path": { + "get": { + "responses": { + "200": { + "description": "OK" + }, + "400": { + "description": "Bad Request" + } + }, + "x-amazon-apigateway-integration": { + "credentials": { + "Fn::GetAtt": [ + "MyStateMachineApiEventRole", + "Arn" + ] + }, + "httpMethod": "POST", + "requestTemplates": { + "application/json": { + "Fn::Sub": "{\"input\": \"$util.escapeJavaScript($input.json('$'))\", \"stateMachineArn\": \"${MyStateMachineAliastest}\"}" + } + }, + "responses": { + "200": { + "statusCode": "200" + }, + "400": { + "statusCode": "400" + } + }, + "type": "aws", + "uri": { + "Fn::Sub": "arn:${AWS::Partition}:apigateway:${AWS::Region}:states:action/StartExecution" + } + } + } + } + }, + "swagger": "2.0" + } + }, + "Type": "AWS::ApiGateway::RestApi" + }, + "ServerlessRestApiDeploymente6166edbc7": { + "Properties": { + "Description": "RestApi deployment id: e6166edbc7b05836f53278af31642807c36e76b3", + "RestApiId": { + "Ref": "ServerlessRestApi" + }, + "StageName": "Stage" + }, + "Type": "AWS::ApiGateway::Deployment" + }, + "ServerlessRestApiProdStage": { + "Properties": { + "DeploymentId": { + "Ref": "ServerlessRestApiDeploymente6166edbc7" + }, + "RestApiId": { + "Ref": "ServerlessRestApi" + }, + "StageName": "Prod" + }, + "Type": "AWS::ApiGateway::Stage" + } + } +} diff --git a/tests/translator/output/state_machine_with_sam_policy_templates.json b/tests/translator/output/state_machine_with_sam_policy_templates.json index 848257ccd..7108504af 100644 --- a/tests/translator/output/state_machine_with_sam_policy_templates.json +++ b/tests/translator/output/state_machine_with_sam_policy_templates.json @@ -394,7 +394,7 @@ "Arn" ] }, - "Runtime": "python3.8", + "Runtime": "python3.11", "Tags": [ { "Key": "lambda:createdBy", diff --git a/tests/translator/test_translator.py b/tests/translator/test_translator.py index 6090b1093..802782666 100644 --- a/tests/translator/test_translator.py +++ b/tests/translator/test_translator.py @@ -781,7 +781,7 @@ class TestTemplateValidation(TestCase): "MyFunction": { "Type": "AWS::Serverless::Function", "Properties": { - "Runtime": "python3.8", + "Runtime": "python3.11", "Handler": "foo", "InlineCode": "bar", "Policies": [