diff --git a/samtranslator/__init__.py b/samtranslator/__init__.py index 3e3e2b16f..ca2330064 100644 --- a/samtranslator/__init__.py +++ b/samtranslator/__init__.py @@ -1 +1 @@ -__version__ = "1.86.0" +__version__ = "1.87.0" diff --git a/samtranslator/region_configuration.py b/samtranslator/region_configuration.py index 6dfc5121c..c2a210b9c 100644 --- a/samtranslator/region_configuration.py +++ b/samtranslator/region_configuration.py @@ -18,12 +18,7 @@ def is_apigw_edge_configuration_supported(cls) -> bool: :return: True, if API Gateway does not support Edge configuration """ - return ArnGenerator.get_partition_name() not in [ - "aws-us-gov", - "aws-iso", - "aws-iso-b", - "aws-cn", - ] + return ArnGenerator.get_partition_name() not in ["aws-us-gov", "aws-iso", "aws-iso-b", "aws-cn", "aws-iso-e"] @classmethod def is_service_supported(cls, service, region=None): # type: ignore[no-untyped-def] diff --git a/samtranslator/schema/schema.json b/samtranslator/schema/schema.json index 56c2245bb..2432b810c 100644 --- a/samtranslator/schema/schema.json +++ b/samtranslator/schema/schema.json @@ -1216,12 +1216,12 @@ "additionalProperties": false, "properties": { "Data": { - "markdownDescription": "The rules definition file for this namespace.", + "markdownDescription": "The rules file used in the namespace.\n\nFor more details about the rules file, see [Creating a rules file](https://docs.aws.amazon.com/prometheus/latest/userguide/AMP-ruler-rulesfile.html) in the *Amazon Managed Service for Prometheus User Guide* .", "title": "Data", "type": "string" }, "Name": { - "markdownDescription": "The name of the rule groups namespace. This property is required.", + "markdownDescription": "The name of the rule groups namespace.", "title": "Name", "type": "string" }, @@ -1229,12 +1229,12 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "A list of key and value pairs for the workspace resources.", + "markdownDescription": "The list of tag keys and values that are associated with the rule groups namespace.", "title": "Tags", "type": "array" }, "Workspace": { - "markdownDescription": "The ARN of the workspace that contains this rule groups namespace.", + "markdownDescription": "An Amazon Managed Service for Prometheus workspace is a logical and isolated Prometheus server dedicated to ingesting, storing, and querying your Prometheus-compatible metrics.", "title": "Workspace", "type": "string" } @@ -1303,30 +1303,30 @@ "additionalProperties": false, "properties": { "AlertManagerDefinition": { - "markdownDescription": "The alert manager definition for the workspace, as a string. For more information, see [Alert manager and templating](https://docs.aws.amazon.com/prometheus/latest/userguide/AMP-alert-manager.html) .", + "markdownDescription": "The alert manager definition, a YAML configuration for the alert manager in your Amazon Managed Service for Prometheus workspace.\n\nFor details about the alert manager definition, see [Creating an alert manager configuration files](https://docs.aws.amazon.com/prometheus/latest/userguide/AMP-alertmanager-config.html) in the *Amazon Managed Service for Prometheus User Guide* .\n\nThe following example shows part of a CloudFormation YAML file with an embedded alert manager definition (following the `- |-` ).\n\n`Workspace: Type: AWS::APS::Workspace .... Properties: .... AlertManagerDefinition: Fn::Sub: - |- alertmanager_config: | templates: - 'default_template' route: receiver: example-sns receivers: - name: example-sns sns_configs: - topic_arn: 'arn:aws:sns:${AWS::Region}:${AWS::AccountId}:${TopicName}' -`", "title": "AlertManagerDefinition", "type": "string" }, "Alias": { - "markdownDescription": "An alias that you assign to this workspace to help you identify it. It does not need to be unique.\n\nThe alias can be as many as 100 characters and can include any type of characters. Amazon Managed Service for Prometheus automatically strips any blank spaces from the beginning and end of the alias that you specify.", + "markdownDescription": "The alias that is assigned to this workspace to help identify it. It does not need to be unique.", "title": "Alias", "type": "string" }, "KmsKeyArn": { - "markdownDescription": "", + "markdownDescription": "(optional) The ARN for a customer managed AWS KMS key to use for encrypting data within your workspace. For more information about using your own key in your workspace, see [Encryption at rest](https://docs.aws.amazon.com/prometheus/latest/userguide/encryption-at-rest-Amazon-Service-Prometheus.html) in the *Amazon Managed Service for Prometheus User Guide* .", "title": "KmsKeyArn", "type": "string" }, "LoggingConfiguration": { "$ref": "#/definitions/AWS::APS::Workspace.LoggingConfiguration", - "markdownDescription": "The LoggingConfiguration attribute is used to set the logging configuration for the workspace.", + "markdownDescription": "Contains information about the logging configuration for the workspace.", "title": "LoggingConfiguration" }, "Tags": { "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "A list of tag keys and values to associate with the workspace.", + "markdownDescription": "The list of tag keys and values that are associated with the workspace.", "title": "Tags", "type": "array" } @@ -1357,7 +1357,7 @@ "additionalProperties": false, "properties": { "LogGroupArn": { - "markdownDescription": "The Amazon Resource Name (ARN) of the CloudWatch log group the logs are emitted to.", + "markdownDescription": "The ARN of the CloudWatch log group to which the vended log data will be published. This log group must exist prior to calling this operation.", "title": "LogGroupArn", "type": "string" } @@ -2319,7 +2319,7 @@ "additionalProperties": false, "properties": { "AccessToken": { - "markdownDescription": "The personal access token for a GitHub repository for an Amplify app. The personal access token is used to authorize access to a GitHub repository using the Amplify GitHub App. The token is not stored.\n\nUse `AccessToken` for GitHub repositories only. To authorize access to a repository provider such as Bitbucket or CodeCommit, use `OauthToken` .\n\nYou must specify either `AccessToken` or `OauthToken` when you create a new app.\n\nExisting Amplify apps deployed from a GitHub repository using OAuth continue to work with CI/CD. However, we strongly recommend that you migrate these apps to use the GitHub App. For more information, see [Migrating an existing OAuth app to the Amplify GitHub App](https://docs.aws.amazon.com/amplify/latest/userguide/setting-up-GitHub-access.html#migrating-to-github-app-auth) in the *Amplify User Guide* .\n\n*Length Constraints:* Minimum length of 1. Maximum length of 255.", + "markdownDescription": "The personal access token for a GitHub repository for an Amplify app. The personal access token is used to authorize access to a GitHub repository using the Amplify GitHub App. The token is not stored.\n\nUse `AccessToken` for GitHub repositories only. To authorize access to a repository provider such as Bitbucket or CodeCommit, use `OauthToken` .\n\nYou must specify either `AccessToken` or `OauthToken` when you create a new app.\n\nExisting Amplify apps deployed from a GitHub repository using OAuth continue to work with CI/CD. However, we strongly recommend that you migrate these apps to use the GitHub App. For more information, see [Migrating an existing OAuth app to the Amplify GitHub App](https://docs.aws.amazon.com/amplify/latest/userguide/setting-up-GitHub-access.html#migrating-to-github-app-auth) in the *Amplify User Guide* .", "title": "AccessToken", "type": "string" }, @@ -2334,12 +2334,12 @@ "title": "BasicAuthConfig" }, "BuildSpec": { - "markdownDescription": "The build specification (build spec) for an Amplify app.\n\n*Length Constraints:* Minimum length of 1. Maximum length of 25000.\n\n*Pattern:* (?s).+", + "markdownDescription": "The build specification (build spec) for an Amplify app.", "title": "BuildSpec", "type": "string" }, "CustomHeaders": { - "markdownDescription": "The custom HTTP headers for an Amplify app.\n\n*Length Constraints:* Minimum length of 0. Maximum length of 25000.\n\n*Pattern:* (?s).*", + "markdownDescription": "The custom HTTP headers for an Amplify app.", "title": "CustomHeaders", "type": "string" }, @@ -2352,7 +2352,7 @@ "type": "array" }, "Description": { - "markdownDescription": "The description for an Amplify app.\n\n*Length Constraints:* Maximum length of 1000.\n\n*Pattern:* (?s).*", + "markdownDescription": "The description of the Amplify app.", "title": "Description", "type": "string" }, @@ -2370,17 +2370,17 @@ "type": "array" }, "IAMServiceRole": { - "markdownDescription": "The AWS Identity and Access Management (IAM) service role for the Amazon Resource Name (ARN) of the Amplify app.\n\n*Length Constraints:* Minimum length of 0. Maximum length of 1000.\n\n*Pattern:* (?s).*", + "markdownDescription": "AWS Identity and Access Management ( IAM ) service role for the Amazon Resource Name (ARN) of the Amplify app.", "title": "IAMServiceRole", "type": "string" }, "Name": { - "markdownDescription": "The name for an Amplify app.\n\n*Length Constraints:* Minimum length of 1. Maximum length of 255.\n\n*Pattern:* (?s).+", + "markdownDescription": "The name of the Amplify app.", "title": "Name", "type": "string" }, "OauthToken": { - "markdownDescription": "The OAuth token for a third-party source control system for an Amplify app. The OAuth token is used to create a webhook and a read-only deploy key using SSH cloning. The OAuth token is not stored.\n\nUse `OauthToken` for repository providers other than GitHub, such as Bitbucket or CodeCommit. To authorize access to GitHub as your repository provider, use `AccessToken` .\n\nYou must specify either `OauthToken` or `AccessToken` when you create a new app.\n\nExisting Amplify apps deployed from a GitHub repository using OAuth continue to work with CI/CD. However, we strongly recommend that you migrate these apps to use the GitHub App. For more information, see [Migrating an existing OAuth app to the Amplify GitHub App](https://docs.aws.amazon.com/amplify/latest/userguide/setting-up-GitHub-access.html#migrating-to-github-app-auth) in the *Amplify User Guide* .\n\n*Length Constraints:* Maximum length of 1000.\n\n*Pattern:* (?s).*", + "markdownDescription": "The OAuth token for a third-party source control system for an Amplify app. The OAuth token is used to create a webhook and a read-only deploy key using SSH cloning. The OAuth token is not stored.\n\nUse `OauthToken` for repository providers other than GitHub, such as Bitbucket or CodeCommit. To authorize access to GitHub as your repository provider, use `AccessToken` .\n\nYou must specify either `OauthToken` or `AccessToken` when you create a new app.\n\nExisting Amplify apps deployed from a GitHub repository using OAuth continue to work with CI/CD. However, we strongly recommend that you migrate these apps to use the GitHub App. For more information, see [Migrating an existing OAuth app to the Amplify GitHub App](https://docs.aws.amazon.com/amplify/latest/userguide/setting-up-GitHub-access.html#migrating-to-github-app-auth) in the *Amplify User Guide* .", "title": "OauthToken", "type": "string" }, @@ -2390,7 +2390,7 @@ "type": "string" }, "Repository": { - "markdownDescription": "The repository for an Amplify app.\n\n*Pattern:* (?s).*", + "markdownDescription": "The Git repository for the Amplify app.", "title": "Repository", "type": "string" }, @@ -2446,7 +2446,7 @@ "title": "BasicAuthConfig" }, "BuildSpec": { - "markdownDescription": "The build specification (build spec) for the autocreated branch.\n\n*Length Constraints:* Minimum length of 1. Maximum length of 25000.", + "markdownDescription": "The build specification (build spec) for the autocreated branch.", "title": "BuildSpec", "type": "string" }, @@ -2484,7 +2484,7 @@ "type": "string" }, "PullRequestEnvironmentName": { - "markdownDescription": "If pull request previews are enabled, you can use this property to specify a dedicated backend environment for your previews. For example, you could specify an environment named `prod` , `test` , or `dev` that you initialized with the Amplify CLI.\n\nTo enable pull request previews, set the `EnablePullRequestPreview` property to `true` .\n\nIf you don't specify an environment, Amplify Hosting provides backend support for each preview by automatically provisioning a temporary backend environment. Amplify deletes this environment when the pull request is closed.\n\nFor more information about creating backend environments, see [Feature Branch Deployments and Team Workflows](https://docs.aws.amazon.com/amplify/latest/userguide/multi-environments.html) in the *AWS Amplify Hosting User Guide* .\n\n*Length Constraints:* Maximum length of 20.\n\n*Pattern:* (?s).*", + "markdownDescription": "If pull request previews are enabled, you can use this property to specify a dedicated backend environment for your previews. For example, you could specify an environment named `prod` , `test` , or `dev` that you initialized with the Amplify CLI.\n\nTo enable pull request previews, set the `EnablePullRequestPreview` property to `true` .\n\nIf you don't specify an environment, Amplify Hosting provides backend support for each preview by automatically provisioning a temporary backend environment. Amplify deletes this environment when the pull request is closed.\n\nFor more information about creating backend environments, see [Feature Branch Deployments and Team Workflows](https://docs.aws.amazon.com/amplify/latest/userguide/multi-environments.html) in the *AWS Amplify Hosting User Guide* .", "title": "PullRequestEnvironmentName", "type": "string" }, @@ -2505,12 +2505,12 @@ "type": "boolean" }, "Password": { - "markdownDescription": "The password for basic authorization.\n\n*Length Constraints:* Minimum length of 1. Maximum length of 255.", + "markdownDescription": "The password for basic authorization.", "title": "Password", "type": "string" }, "Username": { - "markdownDescription": "The user name for basic authorization.\n\n*Length Constraints:* Minimum length of 1. Maximum length of 255.", + "markdownDescription": "The user name for basic authorization.", "title": "Username", "type": "string" } @@ -2521,22 +2521,22 @@ "additionalProperties": false, "properties": { "Condition": { - "markdownDescription": "The condition for a URL rewrite or redirect rule, such as a country code.\n\n*Length Constraints:* Minimum length of 0. Maximum length of 2048.\n\n*Pattern:* (?s).*", + "markdownDescription": "The condition for a URL rewrite or redirect rule, such as a country code.", "title": "Condition", "type": "string" }, "Source": { - "markdownDescription": "The source pattern for a URL rewrite or redirect rule.\n\n*Length Constraints:* Minimum length of 1. Maximum length of 2048.\n\n*Pattern:* (?s).+", + "markdownDescription": "The source pattern for a URL rewrite or redirect rule.", "title": "Source", "type": "string" }, "Status": { - "markdownDescription": "The status code for a URL rewrite or redirect rule.\n\n- **200** - Represents a 200 rewrite rule.\n- **301** - Represents a 301 (moved pemanently) redirect rule. This and all future requests should be directed to the target URL.\n- **302** - Represents a 302 temporary redirect rule.\n- **404** - Represents a 404 redirect rule.\n- **404-200** - Represents a 404 rewrite rule.\n\n*Length Constraints:* Minimum length of 3. Maximum length of 7.\n\n*Pattern:* .{3,7}", + "markdownDescription": "The status code for a URL rewrite or redirect rule.\n\n- **200** - Represents a 200 rewrite rule.\n- **301** - Represents a 301 (moved pemanently) redirect rule. This and all future requests should be directed to the target URL.\n- **302** - Represents a 302 temporary redirect rule.\n- **404** - Represents a 404 redirect rule.\n- **404-200** - Represents a 404 rewrite rule.", "title": "Status", "type": "string" }, "Target": { - "markdownDescription": "The target pattern for a URL rewrite or redirect rule.\n\n*Length Constraints:* Minimum length of 1. Maximum length of 2048.\n\n*Pattern:* (?s).+", + "markdownDescription": "The target pattern for a URL rewrite or redirect rule.", "title": "Target", "type": "string" } @@ -2551,12 +2551,12 @@ "additionalProperties": false, "properties": { "Name": { - "markdownDescription": "The environment variable name.\n\n*Length Constraints:* Maximum length of 255.\n\n*Pattern:* (?s).*", + "markdownDescription": "", "title": "Name", "type": "string" }, "Value": { - "markdownDescription": "The environment variable value.\n\n*Length Constraints:* Maximum length of 5500.\n\n*Pattern:* (?s).*", + "markdownDescription": "", "title": "Value", "type": "string" } @@ -2609,7 +2609,7 @@ }, "Backend": { "$ref": "#/definitions/AWS::Amplify::Branch.Backend", - "markdownDescription": "The backend environment for an Amplify app.", + "markdownDescription": "Specifies the backend for a `Branch` of an Amplify app.", "title": "Backend" }, "BasicAuthConfig": { @@ -2618,17 +2618,17 @@ "title": "BasicAuthConfig" }, "BranchName": { - "markdownDescription": "The name for the branch.\n\n*Length Constraints:* Minimum length of 1. Maximum length of 255.\n\n*Pattern:* (?s).+", + "markdownDescription": "The name for the branch.", "title": "BranchName", "type": "string" }, "BuildSpec": { - "markdownDescription": "The build specification (build spec) for the branch.\n\n*Length Constraints:* Minimum length of 1. Maximum length of 25000.\n\n*Pattern:* (?s).+", + "markdownDescription": "The build specification (build spec) for the branch.", "title": "BuildSpec", "type": "string" }, "Description": { - "markdownDescription": "The description for the branch that is part of an Amplify app.\n\n*Length Constraints:* Maximum length of 1000.\n\n*Pattern:* (?s).*", + "markdownDescription": "The description for the branch that is part of an Amplify app.", "title": "Description", "type": "string" }, @@ -2661,12 +2661,12 @@ "type": "string" }, "PullRequestEnvironmentName": { - "markdownDescription": "If pull request previews are enabled for this branch, you can use this property to specify a dedicated backend environment for your previews. For example, you could specify an environment named `prod` , `test` , or `dev` that you initialized with the Amplify CLI and mapped to this branch.\n\nTo enable pull request previews, set the `EnablePullRequestPreview` property to `true` .\n\nIf you don't specify an environment, Amplify Hosting provides backend support for each preview by automatically provisioning a temporary backend environment. Amplify Hosting deletes this environment when the pull request is closed.\n\nFor more information about creating backend environments, see [Feature Branch Deployments and Team Workflows](https://docs.aws.amazon.com/amplify/latest/userguide/multi-environments.html) in the *AWS Amplify Hosting User Guide* .\n\n*Length Constraints:* Maximum length of 20.\n\n*Pattern:* (?s).*", + "markdownDescription": "If pull request previews are enabled for this branch, you can use this property to specify a dedicated backend environment for your previews. For example, you could specify an environment named `prod` , `test` , or `dev` that you initialized with the Amplify CLI and mapped to this branch.\n\nTo enable pull request previews, set the `EnablePullRequestPreview` property to `true` .\n\nIf you don't specify an environment, Amplify Hosting provides backend support for each preview by automatically provisioning a temporary backend environment. Amplify Hosting deletes this environment when the pull request is closed.\n\nFor more information about creating backend environments, see [Feature Branch Deployments and Team Workflows](https://docs.aws.amazon.com/amplify/latest/userguide/multi-environments.html) in the *AWS Amplify Hosting User Guide* .", "title": "PullRequestEnvironmentName", "type": "string" }, "Stage": { - "markdownDescription": "Describes the current stage for the branch.\n\n*Valid Values:* PRODUCTION | BETA | DEVELOPMENT | EXPERIMENTAL | PULL_REQUEST", + "markdownDescription": "Describes the current stage for the branch.", "title": "Stage", "type": "string" }, @@ -2726,12 +2726,12 @@ "type": "boolean" }, "Password": { - "markdownDescription": "The password for basic authorization.\n\n*Length Constraints:* Minimum length of 1. Maximum length of 255.", + "markdownDescription": "The password for basic authorization.", "title": "Password", "type": "string" }, "Username": { - "markdownDescription": "The user name for basic authorization.\n\n*Length Constraints:* Minimum length of 1. Maximum length of 255.", + "markdownDescription": "", "title": "Username", "type": "string" } @@ -2746,12 +2746,12 @@ "additionalProperties": false, "properties": { "Name": { - "markdownDescription": "The environment variable name.\n\n*Length Constraints:* Maximum length of 255.\n\n*Pattern:* (?s).*", + "markdownDescription": "The environment variable name.", "title": "Name", "type": "string" }, "Value": { - "markdownDescription": "The environment variable value.\n\n*Length Constraints:* Maximum length of 5500.\n\n*Pattern:* (?s).*", + "markdownDescription": "The environment variable value.", "title": "Value", "type": "string" } @@ -2798,7 +2798,7 @@ "additionalProperties": false, "properties": { "AppId": { - "markdownDescription": "The unique ID for an Amplify app.\n\n*Length Constraints:* Minimum length of 1. Maximum length of 20.\n\n*Pattern:* d[a-z0-9]+", + "markdownDescription": "The unique ID for an Amplify app.", "title": "AppId", "type": "string" }, @@ -2811,12 +2811,12 @@ "type": "array" }, "AutoSubDomainIAMRole": { - "markdownDescription": "The required AWS Identity and Access Management (IAM) service role for the Amazon Resource Name (ARN) for automatically creating subdomains.\n\n*Length Constraints:* Maximum length of 1000.\n\n*Pattern:* ^$|^arn:aws:iam::\\d{12}:role.+", + "markdownDescription": "The required AWS Identity and Access Management (IAMlong) service role for the Amazon Resource Name (ARN) for automatically creating subdomains.", "title": "AutoSubDomainIAMRole", "type": "string" }, "DomainName": { - "markdownDescription": "The domain name for the domain association.\n\n*Length Constraints:* Maximum length of 255.\n\n*Pattern:* ^(((?!-)[A-Za-z0-9-]{0,62}[A-Za-z0-9])\\.)+((?!-)[A-Za-z0-9-]{1,62}[A-Za-z0-9])(\\.)?$", + "markdownDescription": "The domain name for the domain association.", "title": "DomainName", "type": "string" }, @@ -2871,7 +2871,7 @@ "type": "string" }, "Prefix": { - "markdownDescription": "The prefix setting for the subdomain.\n\n*Length Constraints:* Maximum length of 255.\n\n*Pattern:* (?s).*", + "markdownDescription": "The prefix setting for the subdomain.", "title": "Prefix", "type": "string" } @@ -7138,7 +7138,7 @@ "type": "string" }, "IntegrationMethod": { - "markdownDescription": "Specifies the integration's HTTP method type.", + "markdownDescription": "Specifies the integration's HTTP method type. For WebSocket APIs, if you use a Lambda integration, you must set the integration method to `POST` .", "title": "IntegrationMethod", "type": "string" }, @@ -7762,7 +7762,7 @@ "type": "string" }, "IntegrationMethod": { - "markdownDescription": "Specifies the integration's HTTP method type.", + "markdownDescription": "Specifies the integration's HTTP method type. For WebSocket APIs, if you use a Lambda integration, you must set the integration method to `POST` .", "title": "IntegrationMethod", "type": "string" }, @@ -17598,7 +17598,7 @@ "type": "string" }, "DisconnectTimeoutInSeconds": { - "markdownDescription": "The amount of time that a streaming session remains active after users disconnect. If users try to reconnect to the streaming session after a disconnection or network interruption within this time interval, they are connected to their previous session. Otherwise, they are connected to a new session with a new streaming instance.\n\nSpecify a value between 60 and 360000.", + "markdownDescription": "The amount of time that a streaming session remains active after users disconnect. If users try to reconnect to the streaming session after a disconnection or network interruption within this time interval, they are connected to their previous session. Otherwise, they are connected to a new session with a new streaming instance.\n\nSpecify a value between 60 and 36000.", "title": "DisconnectTimeoutInSeconds", "type": "number" }, @@ -17628,7 +17628,7 @@ "type": "string" }, "IdleDisconnectTimeoutInSeconds": { - "markdownDescription": "The amount of time that users can be idle (inactive) before they are disconnected from their streaming session and the `DisconnectTimeoutInSeconds` time interval begins. Users are notified before they are disconnected due to inactivity. If they try to reconnect to the streaming session before the time interval specified in `DisconnectTimeoutInSeconds` elapses, they are connected to their previous session. Users are considered idle when they stop providing keyboard or mouse input during their streaming session. File uploads and downloads, audio in, audio out, and pixels changing do not qualify as user activity. If users continue to be idle after the time interval in `IdleDisconnectTimeoutInSeconds` elapses, they are disconnected.\n\nTo prevent users from being disconnected due to inactivity, specify a value of 0. Otherwise, specify a value between 60 and 3600.\n\nIf you enable this feature, we recommend that you specify a value that corresponds exactly to a whole number of minutes (for example, 60, 120, and 180). If you don't do this, the value is rounded to the nearest minute. For example, if you specify a value of 70, users are disconnected after 1 minute of inactivity. If you specify a value that is at the midpoint between two different minutes, the value is rounded up. For example, if you specify a value of 90, users are disconnected after 2 minutes of inactivity.", + "markdownDescription": "The amount of time that users can be idle (inactive) before they are disconnected from their streaming session and the `DisconnectTimeoutInSeconds` time interval begins. Users are notified before they are disconnected due to inactivity. If they try to reconnect to the streaming session before the time interval specified in `DisconnectTimeoutInSeconds` elapses, they are connected to their previous session. Users are considered idle when they stop providing keyboard or mouse input during their streaming session. File uploads and downloads, audio in, audio out, and pixels changing do not qualify as user activity. If users continue to be idle after the time interval in `IdleDisconnectTimeoutInSeconds` elapses, they are disconnected.\n\nTo prevent users from being disconnected due to inactivity, specify a value of 0. Otherwise, specify a value between 60 and 36000.\n\nIf you enable this feature, we recommend that you specify a value that corresponds exactly to a whole number of minutes (for example, 60, 120, and 180). If you don't do this, the value is rounded to the nearest minute. For example, if you specify a value of 70, users are disconnected after 1 minute of inactivity. If you specify a value that is at the midpoint between two different minutes, the value is rounded up. For example, if you specify a value of 90, users are disconnected after 2 minutes of inactivity.", "title": "IdleDisconnectTimeoutInSeconds", "type": "number" }, @@ -17653,7 +17653,7 @@ "type": "number" }, "MaxSessionsPerInstance": { - "markdownDescription": "The maximum number of user sessions on an instance. This only applies to multi-session fleets.", + "markdownDescription": "Max number of user sessions on an instance. This is applicable only for multi-session fleets.", "title": "MaxSessionsPerInstance", "type": "number" }, @@ -17740,7 +17740,7 @@ "type": "number" }, "DesiredSessions": { - "markdownDescription": "The desired number of user sessions for a multi-session fleet. This is not allowed for single-session fleets.\n\nWhen you create a fleet, you must set either the DesiredSessions or DesiredInstances attribute, based on the type of fleet you create. You can\u2019t define both attributes or leave both attributes blank.", + "markdownDescription": "The desired capacity in terms of number of user sessions, for the multi-session fleet. This is not allowed for single-session fleets.\n\nWhen you create a fleet, you must set define either the DesiredSessions or DesiredInstances attribute, based on the type of fleet you create. You can\u2019t define both attributes or leave both attributes blank.", "title": "DesiredSessions", "type": "number" } @@ -18229,7 +18229,7 @@ "type": "string" }, "MaximumLength": { - "markdownDescription": "Specifies the number of characters that can be copied by end users from the local device to the remote session, and to the local device from the remote session.\n\nThis can be specified only for the `CLIPBOARD_COPY_FROM_LOCAL_DEVICE` and `CLIPBOARD_COPY_TO_LOCAL_DEVICE` actions.\n\nThis defaults to 20,971,520 (20 MB) when unspecified and the permission is `ENABLED` . This can't be specified when the permission is `DISABLED` .\n\nThis can only be specified for AlwaysOn and OnDemand fleets. The attribute is not supported on Elastic fleets.\n\nThe value can be between 1 and 20,971,520 (20 MB).", + "markdownDescription": "Specifies the number of characters that can be copied by end users from the local device to the remote session, and to the local device from the remote session.\n\nThis can be specified only for the `CLIPBOARD_COPY_FROM_LOCAL_DEVICE` and `CLIPBOARD_COPY_TO_LOCAL_DEVICE` actions.\n\nThis defaults to 20,971,520 (20 MB) when unspecified and the permission is `ENABLED` . This can't be specified when the permission is `DISABLED` .\n\nThe value can be between 1 and 20,971,520 (20 MB).", "title": "MaximumLength", "type": "number" }, @@ -22483,7 +22483,7 @@ "type": "boolean" }, "SpotMaxPricePercentageOverLowestPrice": { - "markdownDescription": "[Price protection] The price protection threshold for Spot Instances, as a percentage higher than an identified Spot price. The identified Spot price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified price is from either the lowest priced current generation instance types or, failing that, the lowest priced previous generation instance types that match your attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage.\n\nTo turn off price protection, specify a high value, such as `999999` .\n\nIf you set `DesiredCapacityType` to `vcpu` or `memory-mib` , the price protection threshold is based on the per-vCPU or per-memory price instead of the per instance price.\n\n> Only one of `SpotMaxPricePercentageOverLowestPrice` or `MaxSpotPriceAsPercentageOfOptimalOnDemandPrice` can be specified. \n\nDefault: `100`", + "markdownDescription": "[Price protection] The price protection threshold for Spot Instances, as a percentage higher than an identified Spot price. The identified Spot price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified price is from either the lowest priced current generation instance types or, failing that, the lowest priced previous generation instance types that match your attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage.\n\nIf you set `DesiredCapacityType` to `vcpu` or `memory-mib` , the price protection threshold is based on the per-vCPU or per-memory price instead of the per instance price.\n\n> Only one of `SpotMaxPricePercentageOverLowestPrice` or `MaxSpotPriceAsPercentageOfOptimalOnDemandPrice` can be specified. If you don't specify either, Amazon EC2 Auto Scaling will automatically apply optimal price protection to consistently select from a wide range of instance types. To indicate no price protection threshold for Spot Instances, meaning you want to consider all instance types that match your attributes, include one of these parameters and specify a high value, such as `999999` .", "title": "SpotMaxPricePercentageOverLowestPrice", "type": "number" }, @@ -24753,8 +24753,6 @@ "type": "string" }, "ModifiedAt": { - "markdownDescription": "Returns a timestamp representing the date and time for the most recent change for the transformer object.", - "title": "ModifiedAt", "type": "string" }, "Name": { @@ -25018,7 +25016,7 @@ "type": "string" }, "ScheduleExpressionTimezone": { - "markdownDescription": "", + "markdownDescription": "This is the timezone in which the schedule expression is set. By default, ScheduleExpressions are in UTC. You can modify this to a specified timezone.", "title": "ScheduleExpressionTimezone", "type": "string" }, @@ -25552,7 +25550,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "The tag key-value pair applied to those AWS resources that you want to trigger an evaluation for a rule. A maximum of one key-value pair can be provided. The tag value is optional, but it cannot be an empty string. The structure to assign a tag is: `[{\"Key\":\"string\",\"Value\":\"string\"}]` .", + "markdownDescription": "The tag key-value pair applied to those AWS resources that you want to trigger an evaluation for a rule. A maximum of one key-value pair can be provided. The tag value is optional, but it cannot be an empty string if you are creating or editing a framework from the console (though the value can be an empty string when included in a CloudFormation template).\n\nThe structure to assign a tag is: `[{\"Key\":\"string\",\"Value\":\"string\"}]` .", "title": "Tags", "type": "array" } @@ -25814,7 +25812,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "", + "markdownDescription": "Optional tags to include. A tag is a key-value pair you can use to manage, filter, and search for your resources. Allowed characters include UTF-8 letters,numbers, spaces, and the following characters: `+ - = . _ : /.`", "title": "Tags", "type": "array" } @@ -26284,12 +26282,12 @@ "items": { "$ref": "#/definitions/AWS::Batch::ComputeEnvironment.Ec2ConfigurationObject" }, - "markdownDescription": "Provides information used to select Amazon Machine Images (AMIs) for EC2 instances in the compute environment. If `Ec2Configuration` isn't specified, the default is `ECS_AL2` .\n\nWhen updating a compute environment, changing this setting requires an infrastructure update of the compute environment. For more information, see [Updating compute environments](https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html) in the *AWS Batch User Guide* . To remove the EC2 configuration and any custom AMI ID specified in `imageIdOverride` , set this value to an empty string.\n\nOne or two values can be provided.\n\n> This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.", + "markdownDescription": "Provides information used to select Amazon Machine Images (AMIs) for Amazon EC2 instances in the compute environment. If `Ec2Configuration` isn't specified, the default is `ECS_AL2` .\n\nWhen updating a compute environment, changing this setting requires an infrastructure update of the compute environment. For more information, see [Updating compute environments](https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html) in the *AWS Batch User Guide* . To remove the Amazon EC2 configuration and any custom AMI ID specified in `imageIdOverride` , set this value to an empty string.\n\nOne or two values can be provided.\n\n> This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.", "title": "Ec2Configuration", "type": "array" }, "Ec2KeyPair": { - "markdownDescription": "The Amazon EC2 key pair that's used for instances launched in the compute environment. You can use this key pair to log in to your instances with SSH. To remove the Amazon EC2 key pair, set this value to an empty string.\n\nWhen updating a compute environment, changing the EC2 key pair requires an infrastructure update of the compute environment. For more information, see [Updating compute environments](https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html) in the *AWS Batch User Guide* .\n\n> This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.", + "markdownDescription": "The Amazon EC2 key pair that's used for instances launched in the compute environment. You can use this key pair to log in to your instances with SSH. To remove the Amazon EC2 key pair, set this value to an empty string.\n\nWhen updating a compute environment, changing the Amazon EC2 key pair requires an infrastructure update of the compute environment. For more information, see [Updating compute environments](https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html) in the *AWS Batch User Guide* .\n\n> This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.", "title": "Ec2KeyPair", "type": "string" }, @@ -26335,7 +26333,7 @@ "items": { "type": "string" }, - "markdownDescription": "The Amazon EC2 security groups that are associated with instances launched in the compute environment. This parameter is required for Fargate compute resources, where it can contain up to 5 security groups. For Fargate compute resources, providing an empty list is handled as if this parameter wasn't specified and no change is made. For EC2 compute resources, providing an empty list removes the security groups from the compute resource.\n\nWhen updating a compute environment, changing the EC2 security groups requires an infrastructure update of the compute environment. For more information, see [Updating compute environments](https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html) in the *AWS Batch User Guide* .", + "markdownDescription": "The Amazon EC2 security groups that are associated with instances launched in the compute environment. This parameter is required for Fargate compute resources, where it can contain up to 5 security groups. For Fargate compute resources, providing an empty list is handled as if this parameter wasn't specified and no change is made. For Amazon EC2 compute resources, providing an empty list removes the security groups from the compute resource.\n\nWhen updating a compute environment, changing the Amazon EC2 security groups requires an infrastructure update of the compute environment. For more information, see [Updating compute environments](https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html) in the *AWS Batch User Guide* .", "title": "SecurityGroupIds", "type": "array" }, @@ -26348,13 +26346,13 @@ "items": { "type": "string" }, - "markdownDescription": "The VPC subnets where the compute resources are launched. Fargate compute resources can contain up to 16 subnets. For Fargate compute resources, providing an empty list will be handled as if this parameter wasn't specified and no change is made. For EC2 compute resources, providing an empty list removes the VPC subnets from the compute resource. For more information, see [VPCs and subnets](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html) in the *Amazon VPC User Guide* .\n\nWhen updating a compute environment, changing the VPC subnets requires an infrastructure update of the compute environment. For more information, see [Updating compute environments](https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html) in the *AWS Batch User Guide* .\n\n> AWS Batch on Amazon EC2 and AWS Batch on Amazon EKS support Local Zones. For more information, see [Local Zones](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-local-zones) in the *Amazon EC2 User Guide for Linux Instances* , [Amazon EKS and AWS Local Zones](https://docs.aws.amazon.com/eks/latest/userguide/local-zones.html) in the *Amazon EKS User Guide* and [Amazon ECS clusters in Local Zones, Wavelength Zones, and AWS Outposts](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/cluster-regions-zones.html#clusters-local-zones) in the *Amazon ECS Developer Guide* .\n> \n> AWS Batch on Fargate doesn't currently support Local Zones.", + "markdownDescription": "The VPC subnets where the compute resources are launched. Fargate compute resources can contain up to 16 subnets. For Fargate compute resources, providing an empty list will be handled as if this parameter wasn't specified and no change is made. For Amazon EC2 compute resources, providing an empty list removes the VPC subnets from the compute resource. For more information, see [VPCs and subnets](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html) in the *Amazon VPC User Guide* .\n\nWhen updating a compute environment, changing the VPC subnets requires an infrastructure update of the compute environment. For more information, see [Updating compute environments](https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html) in the *AWS Batch User Guide* .\n\n> AWS Batch on Amazon EC2 and AWS Batch on Amazon EKS support Local Zones. For more information, see [Local Zones](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-local-zones) in the *Amazon EC2 User Guide for Linux Instances* , [Amazon EKS and AWS Local Zones](https://docs.aws.amazon.com/eks/latest/userguide/local-zones.html) in the *Amazon EKS User Guide* and [Amazon ECS clusters in Local Zones, Wavelength Zones, and AWS Outposts](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/cluster-regions-zones.html#clusters-local-zones) in the *Amazon ECS Developer Guide* .\n> \n> AWS Batch on Fargate doesn't currently support Local Zones.", "title": "Subnets", "type": "array" }, "Tags": { "additionalProperties": true, - "markdownDescription": "Key-value pair tags to be applied to EC2 resources that are launched in the compute environment. For AWS Batch , these take the form of `\"String1\": \"String2\"` , where `String1` is the tag key and `String2` is the tag value-for example, `{ \"Name\": \"Batch Instance - C4OnDemand\" }` . This is helpful for recognizing your Batch instances in the Amazon EC2 console. These tags aren't seen when using the AWS Batch `ListTagsForResource` API operation.\n\nWhen updating a compute environment, changing this setting requires an infrastructure update of the compute environment. For more information, see [Updating compute environments](https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html) in the *AWS Batch User Guide* .\n\n> This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.", + "markdownDescription": "Key-value pair tags to be applied to Amazon EC2 resources that are launched in the compute environment. For AWS Batch , these take the form of `\"String1\": \"String2\"` , where `String1` is the tag key and `String2` is the tag value-for example, `{ \"Name\": \"Batch Instance - C4OnDemand\" }` . This is helpful for recognizing your Batch instances in the Amazon EC2 console. These tags aren't seen when using the AWS Batch `ListTagsForResource` API operation.\n\nWhen updating a compute environment, changing this setting requires an infrastructure update of the compute environment. For more information, see [Updating compute environments](https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html) in the *AWS Batch User Guide* .\n\n> This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" @@ -26499,12 +26497,12 @@ "properties": { "ContainerProperties": { "$ref": "#/definitions/AWS::Batch::JobDefinition.ContainerProperties", - "markdownDescription": "An object with various properties specific to Amazon ECS based jobs. Valid values are `containerProperties` , `eksProperties` , and `nodeProperties` . Only one can be specified.", + "markdownDescription": "An object with properties specific to Amazon ECS-based jobs. When `containerProperties` is used in the job definition, it can't be used in addition to `eksProperties` , `ecsProperties` , or `nodeProperties` .", "title": "ContainerProperties" }, "EksProperties": { "$ref": "#/definitions/AWS::Batch::JobDefinition.EksProperties", - "markdownDescription": "An object with various properties that are specific to Amazon EKS based jobs. Valid values are `containerProperties` , `eksProperties` , and `nodeProperties` . Only one can be specified.", + "markdownDescription": "An object with properties that are specific to Amazon EKS-based jobs. When `eksProperties` is used in the job definition, it can't be used in addition to `containerProperties` , `ecsProperties` , or `nodeProperties` .", "title": "EksProperties" }, "JobDefinitionName": { @@ -26514,7 +26512,7 @@ }, "NodeProperties": { "$ref": "#/definitions/AWS::Batch::JobDefinition.NodeProperties", - "markdownDescription": "An object with various properties that are specific to multi-node parallel jobs. Valid values are `containerProperties` , `eksProperties` , and `nodeProperties` . Only one can be specified.\n\n> If the job runs on Fargate resources, don't specify `nodeProperties` . Use `containerProperties` instead.", + "markdownDescription": "An object with properties that are specific to multi-node parallel jobs. When `nodeProperties` is used in the job definition, it can't be used in addition to `containerProperties` , `ecsProperties` , or `eksProperties` .\n\n> If the job runs on Fargate resources, don't specify `nodeProperties` . Use `containerProperties` instead.", "title": "NodeProperties" }, "Parameters": { @@ -26568,7 +26566,7 @@ "title": "Timeout" }, "Type": { - "markdownDescription": "The type of job definition. For more information about multi-node parallel jobs, see [Creating a multi-node parallel job definition](https://docs.aws.amazon.com/batch/latest/userguide/multi-node-job-def.html) in the *AWS Batch User Guide* .\n\n> If the job is run on Fargate resources, then `multinode` isn't supported.", + "markdownDescription": "The type of job definition. For more information about multi-node parallel jobs, see [Creating a multi-node parallel job definition](https://docs.aws.amazon.com/batch/latest/userguide/multi-node-job-def.html) in the *AWS Batch User Guide* .\n\n- If the value is `container` , then one of the following is required: `containerProperties` , `ecsProperties` , or `eksProperties` .\n- If the value is `multinode` , then `nodeProperties` is required.\n\n> If the job is run on Fargate resources, then `multinode` isn't supported.", "title": "Type", "type": "string" } @@ -26630,7 +26628,7 @@ }, "FargatePlatformConfiguration": { "$ref": "#/definitions/AWS::Batch::JobDefinition.FargatePlatformConfiguration", - "markdownDescription": "The platform configuration for jobs that are running on Fargate resources. Jobs that are running on EC2 resources must not specify this parameter.", + "markdownDescription": "The platform configuration for jobs that are running on Fargate resources. Jobs that are running on Amazon EC2 resources must not specify this parameter.", "title": "FargatePlatformConfiguration" }, "Image": { @@ -26655,11 +26653,11 @@ }, "LogConfiguration": { "$ref": "#/definitions/AWS::Batch::JobDefinition.LogConfiguration", - "markdownDescription": "The log configuration specification for the container.\n\nThis parameter maps to `LogConfig` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/) and the `--log-driver` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) . By default, containers use the same logging driver that the Docker daemon uses. However the container might use a different logging driver than the Docker daemon by specifying a log driver with this parameter in the container definition. To use a different logging driver for a container, the log system must be configured properly on the container instance (or on a different log server for remote logging options). For more information on the options for different supported log drivers, see [Configure logging drivers](https://docs.aws.amazon.com/https://docs.docker.com/engine/admin/logging/overview/) in the Docker documentation.\n\n> AWS Batch currently supports a subset of the logging drivers available to the Docker daemon (shown in the `LogConfiguration` data type). \n\nThis parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version | grep \"Server API version\"`\n\n> The Amazon ECS container agent running on a container instance must register the logging drivers available on that instance with the `ECS_AVAILABLE_LOGGING_DRIVERS` environment variable before containers placed on that instance can use these log configuration options. For more information, see [Amazon ECS container agent configuration](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) in the *Amazon Elastic Container Service Developer Guide* .", + "markdownDescription": "The log configuration specification for the container.\n\nThis parameter maps to `LogConfig` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/) and the `--log-driver` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) . By default, containers use the same logging driver that the Docker daemon uses. However the container might use a different logging driver than the Docker daemon by specifying a log driver with this parameter in the container definition. To use a different logging driver for a container, the log system must be configured properly on the container instance (or on a different log server for remote logging options). For more information on the options for different supported log drivers, see [Configure logging drivers](https://docs.aws.amazon.com/https://docs.docker.com/engine/admin/logging/overview/) in the Docker documentation.\n\n> AWS Batch currently supports a subset of the logging drivers available to the Docker daemon (shown in the [LogConfiguration](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-logconfiguration.html) data type). \n\nThis parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version | grep \"Server API version\"`\n\n> The Amazon ECS container agent running on a container instance must register the logging drivers available on that instance with the `ECS_AVAILABLE_LOGGING_DRIVERS` environment variable before containers placed on that instance can use these log configuration options. For more information, see [Amazon ECS container agent configuration](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) in the *Amazon Elastic Container Service Developer Guide* .", "title": "LogConfiguration" }, "Memory": { - "markdownDescription": "This parameter is deprecated, use `resourceRequirements` to specify the memory requirements for the job definition. It's not supported for jobs running on Fargate resources. For jobs that run on EC2 resources, it specifies the memory hard limit (in MiB) for a container. If your container attempts to exceed the specified number, it's terminated. You must specify at least 4 MiB of memory for a job using this parameter. The memory hard limit can be specified in several places. It must be specified for each node at least once.", + "markdownDescription": "This parameter is deprecated, use `resourceRequirements` to specify the memory requirements for the job definition. It's not supported for jobs running on Fargate resources. For jobs that run on Amazon EC2 resources, it specifies the memory hard limit (in MiB) for a container. If your container attempts to exceed the specified number, it's terminated. You must specify at least 4 MiB of memory for a job using this parameter. The memory hard limit can be specified in several places. It must be specified for each node at least once.", "title": "Memory", "type": "number" }, @@ -26673,7 +26671,7 @@ }, "NetworkConfiguration": { "$ref": "#/definitions/AWS::Batch::JobDefinition.NetworkConfiguration", - "markdownDescription": "The network configuration for jobs that are running on Fargate resources. Jobs that are running on EC2 resources must not specify this parameter.", + "markdownDescription": "The network configuration for jobs that are running on Fargate resources. Jobs that are running on Amazon EC2 resources must not specify this parameter.", "title": "NetworkConfiguration" }, "Privileged": { @@ -26721,7 +26719,7 @@ "type": "string" }, "Vcpus": { - "markdownDescription": "This parameter is deprecated, use `resourceRequirements` to specify the vCPU requirements for the job definition. It's not supported for jobs running on Fargate resources. For jobs running on EC2 resources, it specifies the number of vCPUs reserved for the job.\n\nEach vCPU is equivalent to 1,024 CPU shares. This parameter maps to `CpuShares` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/) and the `--cpu-shares` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) . The number of vCPUs must be specified but can be specified in several places. You must specify it at least once for each node.", + "markdownDescription": "This parameter is deprecated, use `resourceRequirements` to specify the vCPU requirements for the job definition. It's not supported for jobs running on Fargate resources. For jobs running on Amazon EC2 resources, it specifies the number of vCPUs reserved for the job.\n\nEach vCPU is equivalent to 1,024 CPU shares. This parameter maps to `CpuShares` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/) and the `--cpu-shares` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) . The number of vCPUs must be specified but can be specified in several places. You must specify it at least once for each node.", "title": "Vcpus", "type": "number" }, @@ -26806,7 +26804,7 @@ "items": { "type": "string" }, - "markdownDescription": "An array of arguments to the entrypoint. If this isn't specified, the `CMD` of the container image is used. This corresponds to the `args` member in the [Entrypoint](https://docs.aws.amazon.com/https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#entrypoint) portion of the [Pod](https://docs.aws.amazon.com/https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/) in Kubernetes. Environment variable references are expanded using the container's environment.\n\nIf the referenced environment variable doesn't exist, the reference in the command isn't changed. For example, if the reference is to \" `$(NAME1)` \" and the `NAME1` environment variable doesn't exist, the command string will remain \" `$(NAME1)` .\" `$$` is replaced with `$` , and the resulting string isn't expanded. For example, `$$(VAR_NAME)` is passed as `$(VAR_NAME)` whether or not the `VAR_NAME` environment variable exists. For more information, see [CMD](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/builder/#cmd) in the *Dockerfile reference* and [Define a command and arguments for a pod](https://docs.aws.amazon.com/https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/) in the *Kubernetes documentation* .", + "markdownDescription": "An array of arguments to the entrypoint. If this isn't specified, the `CMD` of the container image is used. This corresponds to the `args` member in the [Entrypoint](https://docs.aws.amazon.com/https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#entrypoint) portion of the [Pod](https://docs.aws.amazon.com/https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/) in Kubernetes. Environment variable references are expanded using the container's environment.\n\nIf the referenced environment variable doesn't exist, the reference in the command isn't changed. For example, if the reference is to \" `$(NAME1)` \" and the `NAME1` environment variable doesn't exist, the command string will remain \" `$(NAME1)` .\" `$$` is replaced with `$` , and the resulting string isn't expanded. For example, `$$(VAR_NAME)` is passed as `$(VAR_NAME)` whether or not the `VAR_NAME` environment variable exists. For more information, see [Dockerfile reference: CMD](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/builder/#cmd) and [Define a command and arguments for a pod](https://docs.aws.amazon.com/https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/) in the *Kubernetes documentation* .", "title": "Args", "type": "array" }, @@ -27341,7 +27339,7 @@ "type": "string" }, "Value": { - "markdownDescription": "The quantity of the specified resource to reserve for the container. The values vary based on the `type` specified.\n\n- **type=\"GPU\"** - The number of physical GPUs to reserve for the container. Make sure that the number of GPUs reserved for all containers in a job doesn't exceed the number of available GPUs on the compute resource that the job is launched on.\n\n> GPUs aren't available for jobs that are running on Fargate resources.\n- **type=\"MEMORY\"** - The memory hard limit (in MiB) present to the container. This parameter is supported for jobs that are running on EC2 resources. If your container attempts to exceed the memory specified, the container is terminated. This parameter maps to `Memory` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/) and the `--memory` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) . You must specify at least 4 MiB of memory for a job. This is required but can be specified in several places for multi-node parallel (MNP) jobs. It must be specified for each node at least once. This parameter maps to `Memory` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/) and the `--memory` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) .\n\n> If you're trying to maximize your resource utilization by providing your jobs as much memory as possible for a particular instance type, see [Memory management](https://docs.aws.amazon.com/batch/latest/userguide/memory-management.html) in the *AWS Batch User Guide* . \n\nFor jobs that are running on Fargate resources, then `value` is the hard limit (in MiB), and must match one of the supported values and the `VCPU` values must be one of the values supported for that memory value.\n\n- **value = 512** - `VCPU` = 0.25\n- **value = 1024** - `VCPU` = 0.25 or 0.5\n- **value = 2048** - `VCPU` = 0.25, 0.5, or 1\n- **value = 3072** - `VCPU` = 0.5, or 1\n- **value = 4096** - `VCPU` = 0.5, 1, or 2\n- **value = 5120, 6144, or 7168** - `VCPU` = 1 or 2\n- **value = 8192** - `VCPU` = 1, 2, or 4\n- **value = 9216, 10240, 11264, 12288, 13312, 14336, or 15360** - `VCPU` = 2 or 4\n- **value = 16384** - `VCPU` = 2, 4, or 8\n- **value = 17408, 18432, 19456, 21504, 22528, 23552, 25600, 26624, 27648, 29696, or 30720** - `VCPU` = 4\n- **value = 20480, 24576, or 28672** - `VCPU` = 4 or 8\n- **value = 36864, 45056, 53248, or 61440** - `VCPU` = 8\n- **value = 32768, 40960, 49152, or 57344** - `VCPU` = 8 or 16\n- **value = 65536, 73728, 81920, 90112, 98304, 106496, 114688, or 122880** - `VCPU` = 16\n- **type=\"VCPU\"** - The number of vCPUs reserved for the container. This parameter maps to `CpuShares` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/) and the `--cpu-shares` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) . Each vCPU is equivalent to 1,024 CPU shares. For EC2 resources, you must specify at least one vCPU. This is required but can be specified in several places; it must be specified for each node at least once.\n\nThe default for the Fargate On-Demand vCPU resource count quota is 6 vCPUs. For more information about Fargate quotas, see [AWS Fargate quotas](https://docs.aws.amazon.com/general/latest/gr/ecs-service.html#service-quotas-fargate) in the *AWS General Reference* .\n\nFor jobs that are running on Fargate resources, then `value` must match one of the supported values and the `MEMORY` values must be one of the values supported for that `VCPU` value. The supported values are 0.25, 0.5, 1, 2, 4, 8, and 16\n\n- **value = 0.25** - `MEMORY` = 512, 1024, or 2048\n- **value = 0.5** - `MEMORY` = 1024, 2048, 3072, or 4096\n- **value = 1** - `MEMORY` = 2048, 3072, 4096, 5120, 6144, 7168, or 8192\n- **value = 2** - `MEMORY` = 4096, 5120, 6144, 7168, 8192, 9216, 10240, 11264, 12288, 13312, 14336, 15360, or 16384\n- **value = 4** - `MEMORY` = 8192, 9216, 10240, 11264, 12288, 13312, 14336, 15360, 16384, 17408, 18432, 19456, 20480, 21504, 22528, 23552, 24576, 25600, 26624, 27648, 28672, 29696, or 30720\n- **value = 8** - `MEMORY` = 16384, 20480, 24576, 28672, 32768, 36864, 40960, 45056, 49152, 53248, 57344, or 61440\n- **value = 16** - `MEMORY` = 32768, 40960, 49152, 57344, 65536, 73728, 81920, 90112, 98304, 106496, 114688, or 122880", + "markdownDescription": "The quantity of the specified resource to reserve for the container. The values vary based on the `type` specified.\n\n- **type=\"GPU\"** - The number of physical GPUs to reserve for the container. Make sure that the number of GPUs reserved for all containers in a job doesn't exceed the number of available GPUs on the compute resource that the job is launched on.\n\n> GPUs aren't available for jobs that are running on Fargate resources.\n- **type=\"MEMORY\"** - The memory hard limit (in MiB) present to the container. This parameter is supported for jobs that are running on Amazon EC2 resources. If your container attempts to exceed the memory specified, the container is terminated. This parameter maps to `Memory` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/) and the `--memory` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) . You must specify at least 4 MiB of memory for a job. This is required but can be specified in several places for multi-node parallel (MNP) jobs. It must be specified for each node at least once. This parameter maps to `Memory` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/) and the `--memory` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) .\n\n> If you're trying to maximize your resource utilization by providing your jobs as much memory as possible for a particular instance type, see [Memory management](https://docs.aws.amazon.com/batch/latest/userguide/memory-management.html) in the *AWS Batch User Guide* . \n\nFor jobs that are running on Fargate resources, then `value` is the hard limit (in MiB), and must match one of the supported values and the `VCPU` values must be one of the values supported for that memory value.\n\n- **value = 512** - `VCPU` = 0.25\n- **value = 1024** - `VCPU` = 0.25 or 0.5\n- **value = 2048** - `VCPU` = 0.25, 0.5, or 1\n- **value = 3072** - `VCPU` = 0.5, or 1\n- **value = 4096** - `VCPU` = 0.5, 1, or 2\n- **value = 5120, 6144, or 7168** - `VCPU` = 1 or 2\n- **value = 8192** - `VCPU` = 1, 2, or 4\n- **value = 9216, 10240, 11264, 12288, 13312, 14336, or 15360** - `VCPU` = 2 or 4\n- **value = 16384** - `VCPU` = 2, 4, or 8\n- **value = 17408, 18432, 19456, 21504, 22528, 23552, 25600, 26624, 27648, 29696, or 30720** - `VCPU` = 4\n- **value = 20480, 24576, or 28672** - `VCPU` = 4 or 8\n- **value = 36864, 45056, 53248, or 61440** - `VCPU` = 8\n- **value = 32768, 40960, 49152, or 57344** - `VCPU` = 8 or 16\n- **value = 65536, 73728, 81920, 90112, 98304, 106496, 114688, or 122880** - `VCPU` = 16\n- **type=\"VCPU\"** - The number of vCPUs reserved for the container. This parameter maps to `CpuShares` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/) and the `--cpu-shares` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) . Each vCPU is equivalent to 1,024 CPU shares. For Amazon EC2 resources, you must specify at least one vCPU. This is required but can be specified in several places; it must be specified for each node at least once.\n\nThe default for the Fargate On-Demand vCPU resource count quota is 6 vCPUs. For more information about Fargate quotas, see [AWS Fargate quotas](https://docs.aws.amazon.com/general/latest/gr/ecs-service.html#service-quotas-fargate) in the *AWS General Reference* .\n\nFor jobs that are running on Fargate resources, then `value` must match one of the supported values and the `MEMORY` values must be one of the values supported for that `VCPU` value. The supported values are 0.25, 0.5, 1, 2, 4, 8, and 16\n\n- **value = 0.25** - `MEMORY` = 512, 1024, or 2048\n- **value = 0.5** - `MEMORY` = 1024, 2048, 3072, or 4096\n- **value = 1** - `MEMORY` = 2048, 3072, 4096, 5120, 6144, 7168, or 8192\n- **value = 2** - `MEMORY` = 4096, 5120, 6144, 7168, 8192, 9216, 10240, 11264, 12288, 13312, 14336, 15360, or 16384\n- **value = 4** - `MEMORY` = 8192, 9216, 10240, 11264, 12288, 13312, 14336, 15360, 16384, 17408, 18432, 19456, 20480, 21504, 22528, 23552, 24576, 25600, 26624, 27648, 28672, 29696, or 30720\n- **value = 8** - `MEMORY` = 16384, 20480, 24576, 28672, 32768, 36864, 40960, 45056, 49152, 53248, 57344, or 61440\n- **value = 16** - `MEMORY` = 32768, 40960, 49152, 57344, 65536, 73728, 81920, 90112, 98304, 106496, 114688, or 122880", "title": "Value", "type": "string" } @@ -33857,7 +33855,7 @@ "type": "boolean" }, "HttpVersion": { - "markdownDescription": "(Optional) Specify the maximum HTTP version(s) that you want viewers to use to communicate with CloudFront . The default value for new distributions is `http1.1` .\n\nFor viewers and CloudFront to use HTTP/2, viewers must support TLSv1.2 or later, and must support Server Name Indication (SNI).\n\nFor viewers and CloudFront to use HTTP/3, viewers must support TLSv1.3 and Server Name Indication (SNI). CloudFront supports HTTP/3 connection migration to allow the viewer to switch networks without losing connection. For more information about connection migration, see [Connection Migration](https://docs.aws.amazon.com/https://www.rfc-editor.org/rfc/rfc9000.html#name-connection-migration) at RFC 9000. For more information about supported TLSv1.3 ciphers, see [Supported protocols and ciphers between viewers and CloudFront](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/secure-connections-supported-viewer-protocols-ciphers.html) .", + "markdownDescription": "(Optional) Specify the HTTP version(s) that you want viewers to use to communicate with CloudFront . The default value for new distributions is `http1.1` .\n\nFor viewers and CloudFront to use HTTP/2, viewers must support TLSv1.2 or later, and must support Server Name Indication (SNI).\n\nFor viewers and CloudFront to use HTTP/3, viewers must support TLSv1.3 and Server Name Indication (SNI). CloudFront supports HTTP/3 connection migration to allow the viewer to switch networks without losing connection. For more information about connection migration, see [Connection Migration](https://docs.aws.amazon.com/https://www.rfc-editor.org/rfc/rfc9000.html#name-connection-migration) at RFC 9000. For more information about supported TLSv1.3 ciphers, see [Supported protocols and ciphers between viewers and CloudFront](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/secure-connections-supported-viewer-protocols-ciphers.html) .", "title": "HttpVersion", "type": "string" }, @@ -34313,7 +34311,7 @@ "additionalProperties": false, "properties": { "OriginAccessIdentity": { - "markdownDescription": "The CloudFront origin access identity to associate with the origin. Use an origin access identity to configure the origin so that viewers can *only* access objects in an Amazon S3 bucket through CloudFront. The format of the value is:\n\norigin-access-identity/cloudfront/ *ID-of-origin-access-identity*\n\nwhere `*ID-of-origin-access-identity*` is the value that CloudFront returned in the `ID` element when you created the origin access identity.\n\nIf you want viewers to be able to access objects using either the CloudFront URL or the Amazon S3 URL, specify an empty `OriginAccessIdentity` element.\n\nTo delete the origin access identity from an existing distribution, update the distribution configuration and include an empty `OriginAccessIdentity` element.\n\nTo replace the origin access identity, update the distribution configuration and specify the new origin access identity.\n\nFor more information about the origin access identity, see [Serving Private Content through CloudFront](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/PrivateContent.html) in the *Amazon CloudFront Developer Guide* .", + "markdownDescription": "> If you're using origin access control (OAC) instead of origin access identity, specify an empty `OriginAccessIdentity` element. For more information, see [Restricting access to an AWS](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-restricting-access-to-origin.html) in the *Amazon CloudFront Developer Guide* . \n\nThe CloudFront origin access identity to associate with the origin. Use an origin access identity to configure the origin so that viewers can *only* access objects in an Amazon S3 bucket through CloudFront. The format of the value is:\n\n`origin-access-identity/cloudfront/ID-of-origin-access-identity`\n\nThe `*ID-of-origin-access-identity*` is the value that CloudFront returned in the `ID` element when you created the origin access identity.\n\nIf you want viewers to be able to access objects using either the CloudFront URL or the Amazon S3 URL, specify an empty `OriginAccessIdentity` element.\n\nTo delete the origin access identity from an existing distribution, update the distribution configuration and include an empty `OriginAccessIdentity` element.\n\nTo replace the origin access identity, update the distribution configuration and specify the new origin access identity.\n\nFor more information about the origin access identity, see [Serving Private Content through CloudFront](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/PrivateContent.html) in the *Amazon CloudFront Developer Guide* .", "title": "OriginAccessIdentity", "type": "string" } @@ -36276,7 +36274,7 @@ "type": "array" }, "Field": { - "markdownDescription": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `readOnly` , `eventCategory` , and `eventSource` .\n\nFor CloudTrail data events, supported fields include `readOnly` , `eventCategory` , `eventName` , `resources.type` , and `resources.ARN` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - Optional. Can be set to `Equals` a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - For filtering management events only. This can be set to `NotEquals` `kms.amazonaws.com` or `NotEquals` `rdsdata.amazonaws.com` .\n- *`eventName`* - Can use any operator. You can use it to \ufb01lter in or \ufb01lter out any data event logged to CloudTrail, such as `PutBucket` or `GetSnapshotBlock` . You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator, and the value can be one of the following:\n\n- `AWS::DynamoDB::Table`\n- `AWS::Lambda::Function`\n- `AWS::S3::Object`\n- `AWS::AppConfig::Configuration`\n- `AWS::B2BI::Transformer`\n- `AWS::Bedrock::AgentAlias`\n- `AWS::Bedrock::KnowledgeBase`\n- `AWS::Cassandra::Table`\n- `AWS::CloudFront::KeyValueStore`\n- `AWS::CloudTrail::Channel`\n- `AWS::CodeWhisperer::Customization`\n- `AWS::CodeWhisperer::Profile`\n- `AWS::Cognito::IdentityPool`\n- `AWS::DynamoDB::Stream`\n- `AWS::EC2::Snapshot`\n- `AWS::EMRWAL::Workspace`\n- `AWS::FinSpace::Environment`\n- `AWS::Glue::Table`\n- `AWS::GreengrassV2::ComponentVersion`\n- `AWS::GreengrassV2::Deployment`\n- `AWS::GuardDuty::Detector`\n- `AWS::IoT::Certificate`\n- `AWS::IoT::Thing`\n- `AWS::IoTSiteWise::Asset`\n- `AWS::IoTSiteWise::TimeSeries`\n- `AWS::IoTTwinMaker::Entity`\n- `AWS::IoTTwinMaker::Workspace`\n- `AWS::KendraRanking::ExecutionPlan`\n- `AWS::KinesisVideo::Stream`\n- `AWS::ManagedBlockchain::Network`\n- `AWS::ManagedBlockchain::Node`\n- `AWS::MedicalImaging::Datastore`\n- `AWS::NeptuneGraph::Graph`\n- `AWS::PCAConnectorAD::Connector`\n- `AWS::QBusiness::Application`\n- `AWS::QBusiness::DataSource`\n- `AWS::QBusiness::Index`\n- `AWS::QBusiness::WebExperience`\n- `AWS::RDS::DBCluster`\n- `AWS::S3::AccessPoint`\n- `AWS::S3ObjectLambda::AccessPoint`\n- `AWS::S3Outposts::Object`\n- `AWS::SageMaker::Endpoint`\n- `AWS::SageMaker::ExperimentTrialComponent`\n- `AWS::SageMaker::FeatureGroup`\n- `AWS::ServiceDiscovery::Namespace`\n- `AWS::ServiceDiscovery::Service`\n- `AWS::SCN::Instance`\n- `AWS::SNS::PlatformEndpoint`\n- `AWS::SNS::Topic`\n- `AWS::SQS::Queue`\n- `AWS::SSMMessages::ControlChannel`\n- `AWS::ThinClient::Device`\n- `AWS::ThinClient::Environment`\n- `AWS::Timestream::Database`\n- `AWS::Timestream::Table`\n- `AWS::VerifiedPermissions::PolicyStore`\n\nYou can have only one `resources.type` \ufb01eld per selector. To log data events on more than one resource type, add another selector.\n- *`resources.ARN`* - You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. For example, if resources.type equals `AWS::S3::Object` , the ARN must be in one of the following formats. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value.\n\nThe trailing slash is intentional; do not exclude it. Replace the text between less than and greater than symbols (<>) with resource-specific information.\n\n- `arn::s3:::/`\n- `arn::s3::://`\n\nWhen resources.type equals `AWS::DynamoDB::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::dynamodb:::table/`\n\nWhen resources.type equals `AWS::Lambda::Function` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::lambda:::function:`\n\nWhen resources.type equals `AWS::AppConfig::Configuration` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::appconfig:::application//environment//configuration/`\n\nWhen resources.type equals `AWS::B2BI::Transformer` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::b2bi:::transformer/`\n\nWhen resources.type equals `AWS::Bedrock::AgentAlias` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::bedrock:::agent-alias//`\n\nWhen resources.type equals `AWS::Bedrock::KnowledgeBase` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::bedrock:::knowledge-base/`\n\nWhen resources.type equals `AWS::Cassandra::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cassandra:::/keyspace//table/`\n\nWhen resources.type equals `AWS::CloudFront::KeyValueStore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cloudfront:::key-value-store/`\n\nWhen resources.type equals `AWS::CloudTrail::Channel` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cloudtrail:::channel/`\n\nWhen resources.type equals `AWS::CodeWhisperer::Customization` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::codewhisperer:::customization/`\n\nWhen resources.type equals `AWS::CodeWhisperer::Profile` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::codewhisperer:::profile/`\n\nWhen resources.type equals `AWS::Cognito::IdentityPool` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cognito-identity:::identitypool/`\n\nWhen `resources.type` equals `AWS::DynamoDB::Stream` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::dynamodb:::table//stream/`\n\nWhen `resources.type` equals `AWS::EC2::Snapshot` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::ec2:::snapshot/`\n\nWhen `resources.type` equals `AWS::EMRWAL::Workspace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::emrwal:::workspace/`\n\nWhen `resources.type` equals `AWS::FinSpace::Environment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::finspace:::environment/`\n\nWhen `resources.type` equals `AWS::Glue::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::glue:::table//`\n\nWhen `resources.type` equals `AWS::GreengrassV2::ComponentVersion` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::greengrass:::components/`\n\nWhen `resources.type` equals `AWS::GreengrassV2::Deployment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::greengrass:::deployments/:guardduty:::detector/`\n\nWhen `resources.type` equals `AWS::IoT::Certificate` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iot:::cert/`\n\nWhen `resources.type` equals `AWS::IoT::Thing` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iot:::thing/`\n\nWhen `resources.type` equals `AWS::IoTSiteWise::Asset` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iotsitewise:::asset/`\n\nWhen `resources.type` equals `AWS::IoTSiteWise::TimeSeries` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iotsitewise:::timeseries/`\n\nWhen `resources.type` equals `AWS::IoTTwinMaker::Entity` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iottwinmaker:::workspace//entity/`\n\nWhen `resources.type` equals `AWS::IoTTwinMaker::Workspace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iottwinmaker:::workspace/`\n\nWhen `resources.type` equals `AWS::KendraRanking::ExecutionPlan` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::kendra-ranking:::rescore-execution-plan/`\n\nWhen `resources.type` equals `AWS::KinesisVideo::Stream` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::kinesisvideo:::stream//`\n\nWhen `resources.type` equals `AWS::ManagedBlockchain::Network` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::managedblockchain:::networks/`\n\nWhen `resources.type` equals `AWS::ManagedBlockchain::Node` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::managedblockchain:::nodes/`\n\nWhen `resources.type` equals `AWS::MedicalImaging::Datastore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::medical-imaging:::datastore/`\n\nWhen `resources.type` equals `AWS::NeptuneGraph::Graph` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::neptune-graph:::graph/`\n\nWhen `resources.type` equals `AWS::PCAConnectorAD::Connector` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::pca-connector-ad:::connector/`\n\nWhen `resources.type` equals `AWS::QBusiness::Application` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application/`\n\nWhen `resources.type` equals `AWS::QBusiness::DataSource` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//index//data-source/`\n\nWhen `resources.type` equals `AWS::QBusiness::Index` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//index/`\n\nWhen `resources.type` equals `AWS::QBusiness::WebExperience` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//web-experience/`\n\nWhen `resources.type` equals `AWS::RDS::DBCluster` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::rds:::cluster/`\n\nWhen `resources.type` equals `AWS::S3::AccessPoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in one of the following formats. To log events on all objects in an S3 access point, we recommend that you use only the access point ARN, don\u2019t include the object path, and use the `StartsWith` or `NotStartsWith` operators.\n\n- `arn::s3:::accesspoint/`\n- `arn::s3:::accesspoint//object/`\n\nWhen `resources.type` equals `AWS::S3ObjectLambda::AccessPoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::s3-object-lambda:::accesspoint/`\n\nWhen `resources.type` equals `AWS::S3Outposts::Object` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::s3-outposts:::`\n\nWhen `resources.type` equals `AWS::SageMaker::Endpoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::endpoint/`\n\nWhen `resources.type` equals `AWS::SageMaker::ExperimentTrialComponent` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::experiment-trial-component/`\n\nWhen `resources.type` equals `AWS::SageMaker::FeatureGroup` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::feature-group/`\n\nWhen `resources.type` equals `AWS::SCN::Instance` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::scn:::instance/`\n\nWhen `resources.type` equals `AWS::ServiceDiscovery::Namespace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::servicediscovery:::namespace/`\n\nWhen `resources.type` equals `AWS::ServiceDiscovery::Service` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::servicediscovery:::service/`\n\nWhen `resources.type` equals `AWS::SNS::PlatformEndpoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sns:::endpoint///`\n\nWhen `resources.type` equals `AWS::SNS::Topic` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sns:::`\n\nWhen `resources.type` equals `AWS::SQS::Queue` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sqs:::`\n\nWhen `resources.type` equals `AWS::SSMMessages::ControlChannel` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::ssmmessages:::control-channel/`\n\nWhen `resources.type` equals `AWS::ThinClient::Device` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::thinclient:::device/`\n\nWhen `resources.type` equals `AWS::ThinClient::Environment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::thinclient:::environment/`\n\nWhen `resources.type` equals `AWS::Timestream::Database` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::timestream:::database/`\n\nWhen `resources.type` equals `AWS::Timestream::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::timestream:::database//table/`\n\nWhen resources.type equals `AWS::VerifiedPermissions::PolicyStore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::verifiedpermissions:::policy-store/`", + "markdownDescription": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `readOnly` , `eventCategory` , and `eventSource` .\n\nFor CloudTrail data events, supported fields include `readOnly` , `eventCategory` , `eventName` , `resources.type` , and `resources.ARN` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - Optional. Can be set to `Equals` a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - For filtering management events only. This can be set to `NotEquals` `kms.amazonaws.com` or `NotEquals` `rdsdata.amazonaws.com` .\n- *`eventName`* - Can use any operator. You can use it to \ufb01lter in or \ufb01lter out any data event logged to CloudTrail, such as `PutBucket` or `GetSnapshotBlock` . You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator, and the value can be one of the following:\n\n- `AWS::DynamoDB::Table`\n- `AWS::Lambda::Function`\n- `AWS::S3::Object`\n- `AWS::AppConfig::Configuration`\n- `AWS::B2BI::Transformer`\n- `AWS::Bedrock::AgentAlias`\n- `AWS::Bedrock::KnowledgeBase`\n- `AWS::Cassandra::Table`\n- `AWS::CloudFront::KeyValueStore`\n- `AWS::CloudTrail::Channel`\n- `AWS::CodeWhisperer::Customization`\n- `AWS::CodeWhisperer::Profile`\n- `AWS::Cognito::IdentityPool`\n- `AWS::DynamoDB::Stream`\n- `AWS::EC2::Snapshot`\n- `AWS::EMRWAL::Workspace`\n- `AWS::FinSpace::Environment`\n- `AWS::Glue::Table`\n- `AWS::GreengrassV2::ComponentVersion`\n- `AWS::GreengrassV2::Deployment`\n- `AWS::GuardDuty::Detector`\n- `AWS::IoT::Certificate`\n- `AWS::IoT::Thing`\n- `AWS::IoTSiteWise::Asset`\n- `AWS::IoTSiteWise::TimeSeries`\n- `AWS::IoTTwinMaker::Entity`\n- `AWS::IoTTwinMaker::Workspace`\n- `AWS::KendraRanking::ExecutionPlan`\n- `AWS::KinesisVideo::Stream`\n- `AWS::ManagedBlockchain::Network`\n- `AWS::ManagedBlockchain::Node`\n- `AWS::MedicalImaging::Datastore`\n- `AWS::NeptuneGraph::Graph`\n- `AWS::PCAConnectorAD::Connector`\n- `AWS::QBusiness::Application`\n- `AWS::QBusiness::DataSource`\n- `AWS::QBusiness::Index`\n- `AWS::QBusiness::WebExperience`\n- `AWS::RDS::DBCluster`\n- `AWS::S3::AccessPoint`\n- `AWS::S3ObjectLambda::AccessPoint`\n- `AWS::S3Outposts::Object`\n- `AWS::SageMaker::Endpoint`\n- `AWS::SageMaker::ExperimentTrialComponent`\n- `AWS::SageMaker::FeatureGroup`\n- `AWS::ServiceDiscovery::Namespace`\n- `AWS::ServiceDiscovery::Service`\n- `AWS::SCN::Instance`\n- `AWS::SNS::PlatformEndpoint`\n- `AWS::SNS::Topic`\n- `AWS::SWF::Domain`\n- `AWS::SQS::Queue`\n- `AWS::SSMMessages::ControlChannel`\n- `AWS::ThinClient::Device`\n- `AWS::ThinClient::Environment`\n- `AWS::Timestream::Database`\n- `AWS::Timestream::Table`\n- `AWS::VerifiedPermissions::PolicyStore`\n\nYou can have only one `resources.type` \ufb01eld per selector. To log data events on more than one resource type, add another selector.\n- *`resources.ARN`* - You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. For example, if resources.type equals `AWS::S3::Object` , the ARN must be in one of the following formats. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value.\n\nThe trailing slash is intentional; do not exclude it. Replace the text between less than and greater than symbols (<>) with resource-specific information.\n\n- `arn::s3:::/`\n- `arn::s3::://`\n\nWhen resources.type equals `AWS::DynamoDB::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::dynamodb:::table/`\n\nWhen resources.type equals `AWS::Lambda::Function` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::lambda:::function:`\n\nWhen resources.type equals `AWS::AppConfig::Configuration` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::appconfig:::application//environment//configuration/`\n\nWhen resources.type equals `AWS::B2BI::Transformer` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::b2bi:::transformer/`\n\nWhen resources.type equals `AWS::Bedrock::AgentAlias` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::bedrock:::agent-alias//`\n\nWhen resources.type equals `AWS::Bedrock::KnowledgeBase` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::bedrock:::knowledge-base/`\n\nWhen resources.type equals `AWS::Cassandra::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cassandra:::/keyspace//table/`\n\nWhen resources.type equals `AWS::CloudFront::KeyValueStore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cloudfront:::key-value-store/`\n\nWhen resources.type equals `AWS::CloudTrail::Channel` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cloudtrail:::channel/`\n\nWhen resources.type equals `AWS::CodeWhisperer::Customization` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::codewhisperer:::customization/`\n\nWhen resources.type equals `AWS::CodeWhisperer::Profile` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::codewhisperer:::profile/`\n\nWhen resources.type equals `AWS::Cognito::IdentityPool` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cognito-identity:::identitypool/`\n\nWhen `resources.type` equals `AWS::DynamoDB::Stream` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::dynamodb:::table//stream/`\n\nWhen `resources.type` equals `AWS::EC2::Snapshot` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::ec2:::snapshot/`\n\nWhen `resources.type` equals `AWS::EMRWAL::Workspace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::emrwal:::workspace/`\n\nWhen `resources.type` equals `AWS::FinSpace::Environment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::finspace:::environment/`\n\nWhen `resources.type` equals `AWS::Glue::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::glue:::table//`\n\nWhen `resources.type` equals `AWS::GreengrassV2::ComponentVersion` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::greengrass:::components/`\n\nWhen `resources.type` equals `AWS::GreengrassV2::Deployment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::greengrass:::deployments/:guardduty:::detector/`\n\nWhen `resources.type` equals `AWS::IoT::Certificate` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iot:::cert/`\n\nWhen `resources.type` equals `AWS::IoT::Thing` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iot:::thing/`\n\nWhen `resources.type` equals `AWS::IoTSiteWise::Asset` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iotsitewise:::asset/`\n\nWhen `resources.type` equals `AWS::IoTSiteWise::TimeSeries` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iotsitewise:::timeseries/`\n\nWhen `resources.type` equals `AWS::IoTTwinMaker::Entity` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iottwinmaker:::workspace//entity/`\n\nWhen `resources.type` equals `AWS::IoTTwinMaker::Workspace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iottwinmaker:::workspace/`\n\nWhen `resources.type` equals `AWS::KendraRanking::ExecutionPlan` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::kendra-ranking:::rescore-execution-plan/`\n\nWhen `resources.type` equals `AWS::KinesisVideo::Stream` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::kinesisvideo:::stream//`\n\nWhen `resources.type` equals `AWS::ManagedBlockchain::Network` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::managedblockchain:::networks/`\n\nWhen `resources.type` equals `AWS::ManagedBlockchain::Node` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::managedblockchain:::nodes/`\n\nWhen `resources.type` equals `AWS::MedicalImaging::Datastore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::medical-imaging:::datastore/`\n\nWhen `resources.type` equals `AWS::NeptuneGraph::Graph` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::neptune-graph:::graph/`\n\nWhen `resources.type` equals `AWS::PCAConnectorAD::Connector` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::pca-connector-ad:::connector/`\n\nWhen `resources.type` equals `AWS::QBusiness::Application` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application/`\n\nWhen `resources.type` equals `AWS::QBusiness::DataSource` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//index//data-source/`\n\nWhen `resources.type` equals `AWS::QBusiness::Index` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//index/`\n\nWhen `resources.type` equals `AWS::QBusiness::WebExperience` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//web-experience/`\n\nWhen `resources.type` equals `AWS::RDS::DBCluster` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::rds:::cluster/`\n\nWhen `resources.type` equals `AWS::S3::AccessPoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in one of the following formats. To log events on all objects in an S3 access point, we recommend that you use only the access point ARN, don\u2019t include the object path, and use the `StartsWith` or `NotStartsWith` operators.\n\n- `arn::s3:::accesspoint/`\n- `arn::s3:::accesspoint//object/`\n\nWhen `resources.type` equals `AWS::S3ObjectLambda::AccessPoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::s3-object-lambda:::accesspoint/`\n\nWhen `resources.type` equals `AWS::S3Outposts::Object` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::s3-outposts:::`\n\nWhen `resources.type` equals `AWS::SageMaker::Endpoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::endpoint/`\n\nWhen `resources.type` equals `AWS::SageMaker::ExperimentTrialComponent` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::experiment-trial-component/`\n\nWhen `resources.type` equals `AWS::SageMaker::FeatureGroup` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::feature-group/`\n\nWhen `resources.type` equals `AWS::SCN::Instance` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::scn:::instance/`\n\nWhen `resources.type` equals `AWS::ServiceDiscovery::Namespace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::servicediscovery:::namespace/`\n\nWhen `resources.type` equals `AWS::ServiceDiscovery::Service` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::servicediscovery:::service/`\n\nWhen `resources.type` equals `AWS::SNS::PlatformEndpoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sns:::endpoint///`\n\nWhen `resources.type` equals `AWS::SNS::Topic` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sns:::`\n\nWhen `resources.type` equals `AWS::SWF::Domain` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::swf:::domain/`\n\nWhen `resources.type` equals `AWS::SQS::Queue` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sqs:::`\n\nWhen `resources.type` equals `AWS::SSMMessages::ControlChannel` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::ssmmessages:::control-channel/`\n\nWhen `resources.type` equals `AWS::ThinClient::Device` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::thinclient:::device/`\n\nWhen `resources.type` equals `AWS::ThinClient::Environment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::thinclient:::environment/`\n\nWhen `resources.type` equals `AWS::Timestream::Database` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::timestream:::database/`\n\nWhen `resources.type` equals `AWS::Timestream::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::timestream:::database//table/`\n\nWhen resources.type equals `AWS::VerifiedPermissions::PolicyStore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::verifiedpermissions:::policy-store/`", "title": "Field", "type": "string" }, @@ -36599,7 +36597,7 @@ "type": "array" }, "Field": { - "markdownDescription": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `readOnly` , `eventCategory` , and `eventSource` .\n\nFor CloudTrail data events, supported fields include `readOnly` , `eventCategory` , `eventName` , `resources.type` , and `resources.ARN` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - Optional. Can be set to `Equals` a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - For filtering management events only. This can be set to `NotEquals` `kms.amazonaws.com` or `NotEquals` `rdsdata.amazonaws.com` .\n- *`eventName`* - Can use any operator. You can use it to \ufb01lter in or \ufb01lter out any data event logged to CloudTrail, such as `PutBucket` or `GetSnapshotBlock` . You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator, and the value can be one of the following:\n\n- `AWS::DynamoDB::Table`\n- `AWS::Lambda::Function`\n- `AWS::S3::Object`\n- `AWS::AppConfig::Configuration`\n- `AWS::B2BI::Transformer`\n- `AWS::Bedrock::AgentAlias`\n- `AWS::Bedrock::KnowledgeBase`\n- `AWS::Cassandra::Table`\n- `AWS::CloudFront::KeyValueStore`\n- `AWS::CloudTrail::Channel`\n- `AWS::CodeWhisperer::Customization`\n- `AWS::CodeWhisperer::Profile`\n- `AWS::Cognito::IdentityPool`\n- `AWS::DynamoDB::Stream`\n- `AWS::EC2::Snapshot`\n- `AWS::EMRWAL::Workspace`\n- `AWS::FinSpace::Environment`\n- `AWS::Glue::Table`\n- `AWS::GreengrassV2::ComponentVersion`\n- `AWS::GreengrassV2::Deployment`\n- `AWS::GuardDuty::Detector`\n- `AWS::IoT::Certificate`\n- `AWS::IoT::Thing`\n- `AWS::IoTSiteWise::Asset`\n- `AWS::IoTSiteWise::TimeSeries`\n- `AWS::IoTTwinMaker::Entity`\n- `AWS::IoTTwinMaker::Workspace`\n- `AWS::KendraRanking::ExecutionPlan`\n- `AWS::KinesisVideo::Stream`\n- `AWS::ManagedBlockchain::Network`\n- `AWS::ManagedBlockchain::Node`\n- `AWS::MedicalImaging::Datastore`\n- `AWS::NeptuneGraph::Graph`\n- `AWS::PCAConnectorAD::Connector`\n- `AWS::QBusiness::Application`\n- `AWS::QBusiness::DataSource`\n- `AWS::QBusiness::Index`\n- `AWS::QBusiness::WebExperience`\n- `AWS::RDS::DBCluster`\n- `AWS::S3::AccessPoint`\n- `AWS::S3ObjectLambda::AccessPoint`\n- `AWS::S3Outposts::Object`\n- `AWS::SageMaker::Endpoint`\n- `AWS::SageMaker::ExperimentTrialComponent`\n- `AWS::SageMaker::FeatureGroup`\n- `AWS::ServiceDiscovery::Namespace`\n- `AWS::ServiceDiscovery::Service`\n- `AWS::SCN::Instance`\n- `AWS::SNS::PlatformEndpoint`\n- `AWS::SNS::Topic`\n- `AWS::SQS::Queue`\n- `AWS::SSMMessages::ControlChannel`\n- `AWS::ThinClient::Device`\n- `AWS::ThinClient::Environment`\n- `AWS::Timestream::Database`\n- `AWS::Timestream::Table`\n- `AWS::VerifiedPermissions::PolicyStore`\n\nYou can have only one `resources.type` \ufb01eld per selector. To log data events on more than one resource type, add another selector.\n- *`resources.ARN`* - You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. For example, if resources.type equals `AWS::S3::Object` , the ARN must be in one of the following formats. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value.\n\nThe trailing slash is intentional; do not exclude it. Replace the text between less than and greater than symbols (<>) with resource-specific information.\n\n- `arn::s3:::/`\n- `arn::s3::://`\n\nWhen resources.type equals `AWS::DynamoDB::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::dynamodb:::table/`\n\nWhen resources.type equals `AWS::Lambda::Function` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::lambda:::function:`\n\nWhen resources.type equals `AWS::AppConfig::Configuration` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::appconfig:::application//environment//configuration/`\n\nWhen resources.type equals `AWS::B2BI::Transformer` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::b2bi:::transformer/`\n\nWhen resources.type equals `AWS::Bedrock::AgentAlias` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::bedrock:::agent-alias//`\n\nWhen resources.type equals `AWS::Bedrock::KnowledgeBase` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::bedrock:::knowledge-base/`\n\nWhen resources.type equals `AWS::Cassandra::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cassandra:::/keyspace//table/`\n\nWhen resources.type equals `AWS::CloudFront::KeyValueStore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cloudfront:::key-value-store/`\n\nWhen resources.type equals `AWS::CloudTrail::Channel` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cloudtrail:::channel/`\n\nWhen resources.type equals `AWS::CodeWhisperer::Customization` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::codewhisperer:::customization/`\n\nWhen resources.type equals `AWS::CodeWhisperer::Profile` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::codewhisperer:::profile/`\n\nWhen resources.type equals `AWS::Cognito::IdentityPool` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cognito-identity:::identitypool/`\n\nWhen `resources.type` equals `AWS::DynamoDB::Stream` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::dynamodb:::table//stream/`\n\nWhen `resources.type` equals `AWS::EC2::Snapshot` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::ec2:::snapshot/`\n\nWhen `resources.type` equals `AWS::EMRWAL::Workspace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::emrwal:::workspace/`\n\nWhen `resources.type` equals `AWS::FinSpace::Environment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::finspace:::environment/`\n\nWhen `resources.type` equals `AWS::Glue::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::glue:::table//`\n\nWhen `resources.type` equals `AWS::GreengrassV2::ComponentVersion` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::greengrass:::components/`\n\nWhen `resources.type` equals `AWS::GreengrassV2::Deployment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::greengrass:::deployments/:guardduty:::detector/`\n\nWhen `resources.type` equals `AWS::IoT::Certificate` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iot:::cert/`\n\nWhen `resources.type` equals `AWS::IoT::Thing` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iot:::thing/`\n\nWhen `resources.type` equals `AWS::IoTSiteWise::Asset` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iotsitewise:::asset/`\n\nWhen `resources.type` equals `AWS::IoTSiteWise::TimeSeries` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iotsitewise:::timeseries/`\n\nWhen `resources.type` equals `AWS::IoTTwinMaker::Entity` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iottwinmaker:::workspace//entity/`\n\nWhen `resources.type` equals `AWS::IoTTwinMaker::Workspace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iottwinmaker:::workspace/`\n\nWhen `resources.type` equals `AWS::KendraRanking::ExecutionPlan` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::kendra-ranking:::rescore-execution-plan/`\n\nWhen `resources.type` equals `AWS::KinesisVideo::Stream` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::kinesisvideo:::stream//`\n\nWhen `resources.type` equals `AWS::ManagedBlockchain::Network` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::managedblockchain:::networks/`\n\nWhen `resources.type` equals `AWS::ManagedBlockchain::Node` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::managedblockchain:::nodes/`\n\nWhen `resources.type` equals `AWS::MedicalImaging::Datastore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::medical-imaging:::datastore/`\n\nWhen `resources.type` equals `AWS::NeptuneGraph::Graph` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::neptune-graph:::graph/`\n\nWhen `resources.type` equals `AWS::PCAConnectorAD::Connector` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::pca-connector-ad:::connector/`\n\nWhen `resources.type` equals `AWS::QBusiness::Application` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application/`\n\nWhen `resources.type` equals `AWS::QBusiness::DataSource` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//index//data-source/`\n\nWhen `resources.type` equals `AWS::QBusiness::Index` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//index/`\n\nWhen `resources.type` equals `AWS::QBusiness::WebExperience` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//web-experience/`\n\nWhen `resources.type` equals `AWS::RDS::DBCluster` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::rds:::cluster/`\n\nWhen `resources.type` equals `AWS::S3::AccessPoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in one of the following formats. To log events on all objects in an S3 access point, we recommend that you use only the access point ARN, don\u2019t include the object path, and use the `StartsWith` or `NotStartsWith` operators.\n\n- `arn::s3:::accesspoint/`\n- `arn::s3:::accesspoint//object/`\n\nWhen `resources.type` equals `AWS::S3ObjectLambda::AccessPoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::s3-object-lambda:::accesspoint/`\n\nWhen `resources.type` equals `AWS::S3Outposts::Object` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::s3-outposts:::`\n\nWhen `resources.type` equals `AWS::SageMaker::Endpoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::endpoint/`\n\nWhen `resources.type` equals `AWS::SageMaker::ExperimentTrialComponent` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::experiment-trial-component/`\n\nWhen `resources.type` equals `AWS::SageMaker::FeatureGroup` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::feature-group/`\n\nWhen `resources.type` equals `AWS::SCN::Instance` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::scn:::instance/`\n\nWhen `resources.type` equals `AWS::ServiceDiscovery::Namespace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::servicediscovery:::namespace/`\n\nWhen `resources.type` equals `AWS::ServiceDiscovery::Service` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::servicediscovery:::service/`\n\nWhen `resources.type` equals `AWS::SNS::PlatformEndpoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sns:::endpoint///`\n\nWhen `resources.type` equals `AWS::SNS::Topic` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sns:::`\n\nWhen `resources.type` equals `AWS::SQS::Queue` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sqs:::`\n\nWhen `resources.type` equals `AWS::SSMMessages::ControlChannel` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::ssmmessages:::control-channel/`\n\nWhen `resources.type` equals `AWS::ThinClient::Device` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::thinclient:::device/`\n\nWhen `resources.type` equals `AWS::ThinClient::Environment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::thinclient:::environment/`\n\nWhen `resources.type` equals `AWS::Timestream::Database` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::timestream:::database/`\n\nWhen `resources.type` equals `AWS::Timestream::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::timestream:::database//table/`\n\nWhen resources.type equals `AWS::VerifiedPermissions::PolicyStore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::verifiedpermissions:::policy-store/`", + "markdownDescription": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `readOnly` , `eventCategory` , and `eventSource` .\n\nFor CloudTrail data events, supported fields include `readOnly` , `eventCategory` , `eventName` , `resources.type` , and `resources.ARN` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - Optional. Can be set to `Equals` a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - For filtering management events only. This can be set to `NotEquals` `kms.amazonaws.com` or `NotEquals` `rdsdata.amazonaws.com` .\n- *`eventName`* - Can use any operator. You can use it to \ufb01lter in or \ufb01lter out any data event logged to CloudTrail, such as `PutBucket` or `GetSnapshotBlock` . You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator, and the value can be one of the following:\n\n- `AWS::DynamoDB::Table`\n- `AWS::Lambda::Function`\n- `AWS::S3::Object`\n- `AWS::AppConfig::Configuration`\n- `AWS::B2BI::Transformer`\n- `AWS::Bedrock::AgentAlias`\n- `AWS::Bedrock::KnowledgeBase`\n- `AWS::Cassandra::Table`\n- `AWS::CloudFront::KeyValueStore`\n- `AWS::CloudTrail::Channel`\n- `AWS::CodeWhisperer::Customization`\n- `AWS::CodeWhisperer::Profile`\n- `AWS::Cognito::IdentityPool`\n- `AWS::DynamoDB::Stream`\n- `AWS::EC2::Snapshot`\n- `AWS::EMRWAL::Workspace`\n- `AWS::FinSpace::Environment`\n- `AWS::Glue::Table`\n- `AWS::GreengrassV2::ComponentVersion`\n- `AWS::GreengrassV2::Deployment`\n- `AWS::GuardDuty::Detector`\n- `AWS::IoT::Certificate`\n- `AWS::IoT::Thing`\n- `AWS::IoTSiteWise::Asset`\n- `AWS::IoTSiteWise::TimeSeries`\n- `AWS::IoTTwinMaker::Entity`\n- `AWS::IoTTwinMaker::Workspace`\n- `AWS::KendraRanking::ExecutionPlan`\n- `AWS::KinesisVideo::Stream`\n- `AWS::ManagedBlockchain::Network`\n- `AWS::ManagedBlockchain::Node`\n- `AWS::MedicalImaging::Datastore`\n- `AWS::NeptuneGraph::Graph`\n- `AWS::PCAConnectorAD::Connector`\n- `AWS::QBusiness::Application`\n- `AWS::QBusiness::DataSource`\n- `AWS::QBusiness::Index`\n- `AWS::QBusiness::WebExperience`\n- `AWS::RDS::DBCluster`\n- `AWS::S3::AccessPoint`\n- `AWS::S3ObjectLambda::AccessPoint`\n- `AWS::S3Outposts::Object`\n- `AWS::SageMaker::Endpoint`\n- `AWS::SageMaker::ExperimentTrialComponent`\n- `AWS::SageMaker::FeatureGroup`\n- `AWS::ServiceDiscovery::Namespace`\n- `AWS::ServiceDiscovery::Service`\n- `AWS::SCN::Instance`\n- `AWS::SNS::PlatformEndpoint`\n- `AWS::SNS::Topic`\n- `AWS::SWF::Domain`\n- `AWS::SQS::Queue`\n- `AWS::SSMMessages::ControlChannel`\n- `AWS::ThinClient::Device`\n- `AWS::ThinClient::Environment`\n- `AWS::Timestream::Database`\n- `AWS::Timestream::Table`\n- `AWS::VerifiedPermissions::PolicyStore`\n\nYou can have only one `resources.type` \ufb01eld per selector. To log data events on more than one resource type, add another selector.\n- *`resources.ARN`* - You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. For example, if resources.type equals `AWS::S3::Object` , the ARN must be in one of the following formats. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value.\n\nThe trailing slash is intentional; do not exclude it. Replace the text between less than and greater than symbols (<>) with resource-specific information.\n\n- `arn::s3:::/`\n- `arn::s3::://`\n\nWhen resources.type equals `AWS::DynamoDB::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::dynamodb:::table/`\n\nWhen resources.type equals `AWS::Lambda::Function` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::lambda:::function:`\n\nWhen resources.type equals `AWS::AppConfig::Configuration` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::appconfig:::application//environment//configuration/`\n\nWhen resources.type equals `AWS::B2BI::Transformer` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::b2bi:::transformer/`\n\nWhen resources.type equals `AWS::Bedrock::AgentAlias` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::bedrock:::agent-alias//`\n\nWhen resources.type equals `AWS::Bedrock::KnowledgeBase` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::bedrock:::knowledge-base/`\n\nWhen resources.type equals `AWS::Cassandra::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cassandra:::/keyspace//table/`\n\nWhen resources.type equals `AWS::CloudFront::KeyValueStore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cloudfront:::key-value-store/`\n\nWhen resources.type equals `AWS::CloudTrail::Channel` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cloudtrail:::channel/`\n\nWhen resources.type equals `AWS::CodeWhisperer::Customization` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::codewhisperer:::customization/`\n\nWhen resources.type equals `AWS::CodeWhisperer::Profile` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::codewhisperer:::profile/`\n\nWhen resources.type equals `AWS::Cognito::IdentityPool` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cognito-identity:::identitypool/`\n\nWhen `resources.type` equals `AWS::DynamoDB::Stream` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::dynamodb:::table//stream/`\n\nWhen `resources.type` equals `AWS::EC2::Snapshot` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::ec2:::snapshot/`\n\nWhen `resources.type` equals `AWS::EMRWAL::Workspace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::emrwal:::workspace/`\n\nWhen `resources.type` equals `AWS::FinSpace::Environment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::finspace:::environment/`\n\nWhen `resources.type` equals `AWS::Glue::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::glue:::table//`\n\nWhen `resources.type` equals `AWS::GreengrassV2::ComponentVersion` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::greengrass:::components/`\n\nWhen `resources.type` equals `AWS::GreengrassV2::Deployment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::greengrass:::deployments/:guardduty:::detector/`\n\nWhen `resources.type` equals `AWS::IoT::Certificate` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iot:::cert/`\n\nWhen `resources.type` equals `AWS::IoT::Thing` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iot:::thing/`\n\nWhen `resources.type` equals `AWS::IoTSiteWise::Asset` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iotsitewise:::asset/`\n\nWhen `resources.type` equals `AWS::IoTSiteWise::TimeSeries` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iotsitewise:::timeseries/`\n\nWhen `resources.type` equals `AWS::IoTTwinMaker::Entity` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iottwinmaker:::workspace//entity/`\n\nWhen `resources.type` equals `AWS::IoTTwinMaker::Workspace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iottwinmaker:::workspace/`\n\nWhen `resources.type` equals `AWS::KendraRanking::ExecutionPlan` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::kendra-ranking:::rescore-execution-plan/`\n\nWhen `resources.type` equals `AWS::KinesisVideo::Stream` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::kinesisvideo:::stream//`\n\nWhen `resources.type` equals `AWS::ManagedBlockchain::Network` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::managedblockchain:::networks/`\n\nWhen `resources.type` equals `AWS::ManagedBlockchain::Node` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::managedblockchain:::nodes/`\n\nWhen `resources.type` equals `AWS::MedicalImaging::Datastore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::medical-imaging:::datastore/`\n\nWhen `resources.type` equals `AWS::NeptuneGraph::Graph` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::neptune-graph:::graph/`\n\nWhen `resources.type` equals `AWS::PCAConnectorAD::Connector` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::pca-connector-ad:::connector/`\n\nWhen `resources.type` equals `AWS::QBusiness::Application` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application/`\n\nWhen `resources.type` equals `AWS::QBusiness::DataSource` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//index//data-source/`\n\nWhen `resources.type` equals `AWS::QBusiness::Index` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//index/`\n\nWhen `resources.type` equals `AWS::QBusiness::WebExperience` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//web-experience/`\n\nWhen `resources.type` equals `AWS::RDS::DBCluster` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::rds:::cluster/`\n\nWhen `resources.type` equals `AWS::S3::AccessPoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in one of the following formats. To log events on all objects in an S3 access point, we recommend that you use only the access point ARN, don\u2019t include the object path, and use the `StartsWith` or `NotStartsWith` operators.\n\n- `arn::s3:::accesspoint/`\n- `arn::s3:::accesspoint//object/`\n\nWhen `resources.type` equals `AWS::S3ObjectLambda::AccessPoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::s3-object-lambda:::accesspoint/`\n\nWhen `resources.type` equals `AWS::S3Outposts::Object` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::s3-outposts:::`\n\nWhen `resources.type` equals `AWS::SageMaker::Endpoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::endpoint/`\n\nWhen `resources.type` equals `AWS::SageMaker::ExperimentTrialComponent` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::experiment-trial-component/`\n\nWhen `resources.type` equals `AWS::SageMaker::FeatureGroup` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::feature-group/`\n\nWhen `resources.type` equals `AWS::SCN::Instance` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::scn:::instance/`\n\nWhen `resources.type` equals `AWS::ServiceDiscovery::Namespace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::servicediscovery:::namespace/`\n\nWhen `resources.type` equals `AWS::ServiceDiscovery::Service` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::servicediscovery:::service/`\n\nWhen `resources.type` equals `AWS::SNS::PlatformEndpoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sns:::endpoint///`\n\nWhen `resources.type` equals `AWS::SNS::Topic` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sns:::`\n\nWhen `resources.type` equals `AWS::SWF::Domain` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::swf:::domain/`\n\nWhen `resources.type` equals `AWS::SQS::Queue` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sqs:::`\n\nWhen `resources.type` equals `AWS::SSMMessages::ControlChannel` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::ssmmessages:::control-channel/`\n\nWhen `resources.type` equals `AWS::ThinClient::Device` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::thinclient:::device/`\n\nWhen `resources.type` equals `AWS::ThinClient::Environment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::thinclient:::environment/`\n\nWhen `resources.type` equals `AWS::Timestream::Database` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::timestream:::database/`\n\nWhen `resources.type` equals `AWS::Timestream::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::timestream:::database//table/`\n\nWhen resources.type equals `AWS::VerifiedPermissions::PolicyStore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::verifiedpermissions:::policy-store/`", "title": "Field", "type": "string" }, @@ -36765,7 +36763,7 @@ "type": "string" }, "ComparisonOperator": { - "markdownDescription": "The arithmetic operation to use when comparing the specified statistic and threshold. The specified statistic value is used as the first operand.\n\nYou can specify the following values: `GreaterThanThreshold` , `GreaterThanOrEqualToThreshold` , `LessThanThreshold` , or `LessThanOrEqualToThreshold` .", + "markdownDescription": "The arithmetic operation to use when comparing the specified statistic and threshold. The specified statistic value is used as the first operand.", "title": "ComparisonOperator", "type": "string" }, @@ -38715,7 +38713,7 @@ "type": "string" }, "Type": { - "markdownDescription": "The type of webhook filter. There are six webhook filter types: `EVENT` , `ACTOR_ACCOUNT_ID` , `HEAD_REF` , `BASE_REF` , `FILE_PATH` , and `COMMIT_MESSAGE` .\n\n- **EVENT** - A webhook event triggers a build when the provided `pattern` matches one of five event types: `PUSH` , `PULL_REQUEST_CREATED` , `PULL_REQUEST_UPDATED` , `PULL_REQUEST_REOPENED` , and `PULL_REQUEST_MERGED` . The `EVENT` patterns are specified as a comma-separated string. For example, `PUSH, PULL_REQUEST_CREATED, PULL_REQUEST_UPDATED` filters all push, pull request created, and pull request updated events.\n\n> The `PULL_REQUEST_REOPENED` works with GitHub and GitHub Enterprise only.\n- **ACTOR_ACCOUNT_ID** - A webhook event triggers a build when a GitHub, GitHub Enterprise, or Bitbucket account ID matches the regular expression `pattern` .\n- **HEAD_REF** - A webhook event triggers a build when the head reference matches the regular expression `pattern` . For example, `refs/heads/branch-name` and `refs/tags/tag-name` .\n\nWorks with GitHub and GitHub Enterprise push, GitHub and GitHub Enterprise pull request, Bitbucket push, and Bitbucket pull request events.\n- **BASE_REF** - A webhook event triggers a build when the base reference matches the regular expression `pattern` . For example, `refs/heads/branch-name` .\n\n> Works with pull request events only.\n- **FILE_PATH** - A webhook triggers a build when the path of a changed file matches the regular expression `pattern` .\n\n> Works with GitHub and Bitbucket events push and pull requests events. Also works with GitHub Enterprise push events, but does not work with GitHub Enterprise pull request events.\n- **COMMIT_MESSAGE** - A webhook triggers a build when the head commit message matches the regular expression `pattern` .\n\n> Works with GitHub and Bitbucket events push and pull requests events. Also works with GitHub Enterprise push events, but does not work with GitHub Enterprise pull request events.", + "markdownDescription": "The type of webhook filter. There are six webhook filter types: `EVENT` , `ACTOR_ACCOUNT_ID` , `HEAD_REF` , `BASE_REF` , `FILE_PATH` , and `COMMIT_MESSAGE` .\n\n- EVENT\n\n- A webhook event triggers a build when the provided `pattern` matches one of six event types: `PUSH` , `PULL_REQUEST_CREATED` , `PULL_REQUEST_UPDATED` , `PULL_REQUEST_CLOSED` , `PULL_REQUEST_REOPENED` , and `PULL_REQUEST_MERGED` . The `EVENT` patterns are specified as a comma-separated string. For example, `PUSH, PULL_REQUEST_CREATED, PULL_REQUEST_UPDATED` filters all push, pull request created, and pull request updated events.\n\n> The `PULL_REQUEST_REOPENED` works with GitHub and GitHub Enterprise only.\n- ACTOR_ACCOUNT_ID\n\n- A webhook event triggers a build when a GitHub, GitHub Enterprise, or Bitbucket account ID matches the regular expression `pattern` .\n- HEAD_REF\n\n- A webhook event triggers a build when the head reference matches the regular expression `pattern` . For example, `refs/heads/branch-name` and `refs/tags/tag-name` .\n\nWorks with GitHub and GitHub Enterprise push, GitHub and GitHub Enterprise pull request, Bitbucket push, and Bitbucket pull request events.\n- BASE_REF\n\n- A webhook event triggers a build when the base reference matches the regular expression `pattern` . For example, `refs/heads/branch-name` .\n\n> Works with pull request events only.\n- FILE_PATH\n\n- A webhook triggers a build when the path of a changed file matches the regular expression `pattern` .\n\n> Works with GitHub and Bitbucket events push and pull requests events. Also works with GitHub Enterprise push events, but does not work with GitHub Enterprise pull request events.\n- COMMIT_MESSAGE\n\n- A webhook triggers a build when the head commit message matches the regular expression `pattern` .\n\n> Works with GitHub and Bitbucket events push and pull requests events. Also works with GitHub Enterprise push events, but does not work with GitHub Enterprise pull request events.", "title": "Type", "type": "string" } @@ -42298,7 +42296,7 @@ "type": "array" }, "DeletionProtection": { - "markdownDescription": "When active, `DeletionProtection` prevents accidental deletion of your user\npool. Before you can delete a user pool that you have protected against deletion, you\nmust deactivate this feature.\n\nWhen you try to delete a protected user pool in a `DeleteUserPool` API request, Amazon Cognito returns an `InvalidParameterException` error. To delete a protected user pool, send a new `DeleteUserPool` request after you deactivate deletion protection in an `UpdateUserPool` API request.", + "markdownDescription": "When active, `DeletionProtection` prevents accidental deletion of your user pool. Before you can delete a user pool that you have protected against deletion, you must deactivate this feature.\n\nWhen you try to delete a protected user pool in a `DeleteUserPool` API request, Amazon Cognito returns an `InvalidParameterException` error. To delete a protected user pool, send a new `DeleteUserPool` request after you deactivate deletion protection in an `UpdateUserPool` API request.", "title": "DeletionProtection", "type": "string" }, @@ -42650,7 +42648,7 @@ "additionalProperties": false, "properties": { "MaxValue": { - "markdownDescription": "The maximum value of an attribute that is of the number data type.", + "markdownDescription": "The maximum length of a number attribute value. Must be a number less than or equal to `2^1023` , represented as a string with a length of 131072 characters or fewer.", "title": "MaxValue", "type": "string" }, @@ -42807,7 +42805,7 @@ "additionalProperties": false, "properties": { "MaxLength": { - "markdownDescription": "The maximum length.", + "markdownDescription": "The maximum length of a string attribute value. Must be a number less than or equal to `2^1023` , represented as a string with a length of 131072 characters or fewer.", "title": "MaxLength", "type": "string" }, @@ -43367,7 +43365,7 @@ "type": "array" }, "ProviderDetails": { - "markdownDescription": "The scopes, URLs, and identifiers for your external identity provider. The following\nexamples describe the provider detail keys for each IdP type. These values and their\nschema are subject to change. Social IdP `authorize_scopes` values must match\nthe values listed here.\n\n- **OpenID Connect (OIDC)** - Amazon Cognito accepts the following elements when it can't discover endpoint URLs from `oidc_issuer` : `attributes_url` , `authorize_url` , `jwks_uri` , `token_url` .\n\nCreate or update request: `\"ProviderDetails\": { \"attributes_request_method\": \"GET\", \"attributes_url\": \"https://auth.example.com/userInfo\", \"authorize_scopes\": \"openid profile email\", \"authorize_url\": \"https://auth.example.com/authorize\", \"client_id\": \"1example23456789\", \"client_secret\": \"provider-app-client-secret\", \"jwks_uri\": \"https://auth.example.com/.well-known/jwks.json\", \"oidc_issuer\": \"https://auth.example.com\", \"token_url\": \"https://example.com/token\" }`\n\nDescribe response: `\"ProviderDetails\": { \"attributes_request_method\": \"GET\", \"attributes_url\": \"https://auth.example.com/userInfo\", \"attributes_url_add_attributes\": \"false\", \"authorize_scopes\": \"openid profile email\", \"authorize_url\": \"https://auth.example.com/authorize\", \"client_id\": \"1example23456789\", \"client_secret\": \"provider-app-client-secret\", \"jwks_uri\": \"https://auth.example.com/.well-known/jwks.json\", \"oidc_issuer\": \"https://auth.example.com\", \"token_url\": \"https://example.com/token\" }`\n- **SAML** - Create or update request with Metadata URL: `\"ProviderDetails\": { \"IDPInit\": \"true\", \"IDPSignout\": \"true\", \"EncryptedResponses\" : \"true\", \"MetadataURL\": \"https://auth.example.com/sso/saml/metadata\", \"RequestSigningAlgorithm\": \"rsa-sha256\" }`\n\nCreate or update request with Metadata file: `\"ProviderDetails\": { \"IDPInit\": \"true\", \"IDPSignout\": \"true\", \"EncryptedResponses\" : \"true\", \"MetadataFile\": \"[metadata XML]\", \"RequestSigningAlgorithm\": \"rsa-sha256\" }`\n\nThe value of `MetadataFile` must be the plaintext metadata document with all quote (\") characters escaped by backslashes.\n\nDescribe response: `\"ProviderDetails\": { \"IDPInit\": \"true\", \"IDPSignout\": \"true\", \"EncryptedResponses\" : \"true\", \"ActiveEncryptionCertificate\": \"[certificate]\", \"MetadataURL\": \"https://auth.example.com/sso/saml/metadata\", \"RequestSigningAlgorithm\": \"rsa-sha256\", \"SLORedirectBindingURI\": \"https://auth.example.com/slo/saml\", \"SSORedirectBindingURI\": \"https://auth.example.com/sso/saml\" }`\n- **LoginWithAmazon** - Create or update request: `\"ProviderDetails\": { \"authorize_scopes\": \"profile postal_code\", \"client_id\": \"amzn1.application-oa2-client.1example23456789\", \"client_secret\": \"provider-app-client-secret\"`\n\nDescribe response: `\"ProviderDetails\": { \"attributes_url\": \"https://api.amazon.com/user/profile\", \"attributes_url_add_attributes\": \"false\", \"authorize_scopes\": \"profile postal_code\", \"authorize_url\": \"https://www.amazon.com/ap/oa\", \"client_id\": \"amzn1.application-oa2-client.1example23456789\", \"client_secret\": \"provider-app-client-secret\", \"token_request_method\": \"POST\", \"token_url\": \"https://api.amazon.com/auth/o2/token\" }`\n- **Google** - Create or update request: `\"ProviderDetails\": { \"authorize_scopes\": \"email profile openid\", \"client_id\": \"1example23456789.apps.googleusercontent.com\", \"client_secret\": \"provider-app-client-secret\" }`\n\nDescribe response: `\"ProviderDetails\": { \"attributes_url\": \"https://people.googleapis.com/v1/people/me?personFields=\", \"attributes_url_add_attributes\": \"true\", \"authorize_scopes\": \"email profile openid\", \"authorize_url\": \"https://accounts.google.com/o/oauth2/v2/auth\", \"client_id\": \"1example23456789.apps.googleusercontent.com\", \"client_secret\": \"provider-app-client-secret\", \"oidc_issuer\": \"https://accounts.google.com\", \"token_request_method\": \"POST\", \"token_url\": \"https://www.googleapis.com/oauth2/v4/token\" }`\n- **SignInWithApple** - Create or update request: `\"ProviderDetails\": { \"authorize_scopes\": \"email name\", \"client_id\": \"com.example.cognito\", \"private_key\": \"1EXAMPLE\", \"key_id\": \"2EXAMPLE\", \"team_id\": \"3EXAMPLE\" }`\n\nDescribe response: `\"ProviderDetails\": { \"attributes_url_add_attributes\": \"false\", \"authorize_scopes\": \"email name\", \"authorize_url\": \"https://appleid.apple.com/auth/authorize\", \"client_id\": \"com.example.cognito\", \"key_id\": \"1EXAMPLE\", \"oidc_issuer\": \"https://appleid.apple.com\", \"team_id\": \"2EXAMPLE\", \"token_request_method\": \"POST\", \"token_url\": \"https://appleid.apple.com/auth/token\" }`\n- **Facebook** - Create or update request: `\"ProviderDetails\": { \"api_version\": \"v17.0\", \"authorize_scopes\": \"public_profile, email\", \"client_id\": \"1example23456789\", \"client_secret\": \"provider-app-client-secret\" }`\n\nDescribe response: `\"ProviderDetails\": { \"api_version\": \"v17.0\", \"attributes_url\": \"https://graph.facebook.com/v17.0/me?fields=\", \"attributes_url_add_attributes\": \"true\", \"authorize_scopes\": \"public_profile, email\", \"authorize_url\": \"https://www.facebook.com/v17.0/dialog/oauth\", \"client_id\": \"1example23456789\", \"client_secret\": \"provider-app-client-secret\", \"token_request_method\": \"GET\", \"token_url\": \"https://graph.facebook.com/v17.0/oauth/access_token\" }`", + "markdownDescription": "The IdP details. The following list describes the provider detail keys for each IdP type.\n\n- For Google and Login with Amazon:\n\n- client_id\n- client_secret\n- authorize_scopes\n- For Facebook:\n\n- client_id\n- client_secret\n- authorize_scopes\n- api_version\n- For Sign in with Apple:\n\n- client_id\n- team_id\n- key_id\n- private_key\n- authorize_scopes\n- For OpenID Connect (OIDC) providers:\n\n- client_id\n- client_secret\n- attributes_request_method\n- oidc_issuer\n- authorize_scopes\n- The following keys are only present if Amazon Cognito didn't discover them at the `oidc_issuer` URL.\n\n- authorize_url\n- token_url\n- attributes_url\n- jwks_uri\n- Amazon Cognito sets the value of the following keys automatically. They are read-only.\n\n- attributes_url_add_attributes\n- For SAML providers:\n\n- MetadataFile or MetadataURL\n- IDPSignout *optional*", "title": "ProviderDetails", "type": "object" }, @@ -45384,7 +45382,7 @@ "additionalProperties": false, "properties": { "DocumentName": { - "markdownDescription": "The name or Amazon Resource Name (ARN) of the SSM document to use to create a conformance pack. If you use the document name, AWS Config checks only your account and AWS Region for the SSM document. If you want to use an SSM document from another Region or account, you must provide the ARN.", + "markdownDescription": "The name or Amazon Resource Name (ARN) of the SSM document to use to create a conformance pack. If you use the document name, AWS Config checks only your account and AWS Region for the SSM document.", "title": "DocumentName", "type": "string" }, @@ -49245,7 +49243,7 @@ "additionalProperties": false, "properties": { "AfterContactWorkTimeLimit": { - "markdownDescription": "The After Call Work (ACW) timeout setting, in seconds.\n\n> When returned by a `SearchUsers` call, `AfterContactWorkTimeLimit` is returned in milliseconds.", + "markdownDescription": "The After Call Work (ACW) timeout setting, in seconds. This parameter has a minimum value of 0 and a maximum value of 2,000,000 seconds (24 days). Enter 0 if you don't want to allocate a specific amount of ACW time. It essentially means an indefinite amount of time. When the conversation ends, ACW starts; the agent must choose Close contact to end ACW.\n\n> When returned by a `SearchUsers` call, `AfterContactWorkTimeLimit` is returned in milliseconds.", "title": "AfterContactWorkTimeLimit", "type": "number" }, @@ -49802,7 +49800,7 @@ "additionalProperties": false, "properties": { "ControlIdentifier": { - "markdownDescription": "The ARN of the control. Only *Strongly recommended* and *Elective* controls are permitted, with the exception of the *landing zone Region deny* control. For information on how to find the `controlIdentifier` , see [the overview page](https://docs.aws.amazon.com//controltower/latest/APIReference/Welcome.html) .", + "markdownDescription": "The ARN of the control. Only *Strongly recommended* and *Elective* controls are permitted, with the exception of the *Region deny* control. For information on how to find the `controlIdentifier` , see [the overview page](https://docs.aws.amazon.com//controltower/latest/APIReference/Welcome.html) .", "title": "ControlIdentifier", "type": "string" }, @@ -61437,7 +61435,7 @@ "type": "array" }, "ProjectionType": { - "markdownDescription": "The set of attributes that are projected into the index:\n\n- `KEYS_ONLY` - Only the index and primary keys are projected into the index.\n- `INCLUDE` - In addition to the attributes described in `KEYS_ONLY` , the secondary index will include other non-key attributes that you specify.\n- `ALL` - All of the table attributes are projected into the index.", + "markdownDescription": "The set of attributes that are projected into the index:\n\n- `KEYS_ONLY` - Only the index and primary keys are projected into the index.\n- `INCLUDE` - In addition to the attributes described in `KEYS_ONLY` , the secondary index will include other non-key attributes that you specify.\n- `ALL` - All of the table attributes are projected into the index.\n\nWhen using the DynamoDB console, `ALL` is selected by default.", "title": "ProjectionType", "type": "string" } @@ -62043,7 +62041,7 @@ "type": "array" }, "ProjectionType": { - "markdownDescription": "The set of attributes that are projected into the index:\n\n- `KEYS_ONLY` - Only the index and primary keys are projected into the index.\n- `INCLUDE` - In addition to the attributes described in `KEYS_ONLY` , the secondary index will include other non-key attributes that you specify.\n- `ALL` - All of the table attributes are projected into the index.", + "markdownDescription": "The set of attributes that are projected into the index:\n\n- `KEYS_ONLY` - Only the index and primary keys are projected into the index.\n- `INCLUDE` - In addition to the attributes described in `KEYS_ONLY` , the secondary index will include other non-key attributes that you specify.\n- `ALL` - All of the table attributes are projected into the index.\n\nWhen using the DynamoDB console, `ALL` is selected by default.", "title": "ProjectionType", "type": "string" } @@ -63564,7 +63562,7 @@ "type": "string" }, "WeightedCapacity": { - "markdownDescription": "The number of units provided by the specified instance type.", + "markdownDescription": "The number of units provided by the specified instance type.\n\n> When specifying weights, the price used in the `lowest-price` and `price-capacity-optimized` allocation strategies is per *unit* hour (where the instance price is divided by the specified weight). However, if all the specified weights are above the requested `TargetCapacity` , resulting in only 1 instance being launched, the price used is per *instance* hour.", "title": "WeightedCapacity", "type": "number" } @@ -63723,7 +63721,7 @@ "type": "boolean" }, "SpotMaxPricePercentageOverLowestPrice": { - "markdownDescription": "[Price protection] The price protection threshold for Spot Instances, as a percentage higher than an identified Spot price. The identified Spot price is the Spot price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified Spot price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose Spot price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nTo indicate no price protection threshold, specify a high value, such as `999999` .\n\nIf you set `TargetCapacityUnitType` to `vcpu` or `memory-mib` , the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price.\n\nThis parameter is not supported for [GetSpotPlacementScores](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetSpotPlacementScores.html) and [GetInstanceTypesFromInstanceRequirements](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetInstanceTypesFromInstanceRequirements.html) .\n\n> Only one of `SpotMaxPricePercentageOverLowestPrice` or `MaxSpotPriceAsPercentageOfOptimalOnDemandPrice` can be specified. If you don't specify either, then `SpotMaxPricePercentageOverLowestPrice` is used and the value for that parameter defaults to `100` . \n\nDefault: `100`", + "markdownDescription": "[Price protection] The price protection threshold for Spot Instances, as a percentage higher than an identified Spot price. The identified Spot price is the Spot price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified Spot price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose Spot price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nIf you set `TargetCapacityUnitType` to `vcpu` or `memory-mib` , the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price.\n\nThis parameter is not supported for [GetSpotPlacementScores](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetSpotPlacementScores.html) and [GetInstanceTypesFromInstanceRequirements](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetInstanceTypesFromInstanceRequirements.html) .\n\n> Only one of `SpotMaxPricePercentageOverLowestPrice` or `MaxSpotPriceAsPercentageOfOptimalOnDemandPrice` can be specified. If you don't specify either, Amazon EC2 will automatically apply optimal price protection to consistently select from a wide range of instance types. To indicate no price protection threshold for Spot Instances, meaning you want to consider all instance types that match your attributes, include one of these parameters and specify a high value, such as `999999` . \n\nDefault: `100`", "title": "SpotMaxPricePercentageOverLowestPrice", "type": "number" }, @@ -66824,7 +66822,7 @@ "type": "boolean" }, "SpotMaxPricePercentageOverLowestPrice": { - "markdownDescription": "[Price protection] The price protection threshold for Spot Instances, as a percentage higher than an identified Spot price. The identified Spot price is the Spot price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified Spot price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose Spot price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nTo indicate no price protection threshold, specify a high value, such as `999999` .\n\nIf you set `TargetCapacityUnitType` to `vcpu` or `memory-mib` , the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price.\n\nThis parameter is not supported for [GetSpotPlacementScores](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetSpotPlacementScores.html) and [GetInstanceTypesFromInstanceRequirements](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetInstanceTypesFromInstanceRequirements.html) .\n\n> Only one of `SpotMaxPricePercentageOverLowestPrice` or `MaxSpotPriceAsPercentageOfOptimalOnDemandPrice` can be specified. If you don't specify either, then `SpotMaxPricePercentageOverLowestPrice` is used and the value for that parameter defaults to `100` . \n\nDefault: `100`", + "markdownDescription": "[Price protection] The price protection threshold for Spot Instances, as a percentage higher than an identified Spot price. The identified Spot price is the Spot price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified Spot price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose Spot price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nIf you set `TargetCapacityUnitType` to `vcpu` or `memory-mib` , the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price.\n\nThis parameter is not supported for [GetSpotPlacementScores](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetSpotPlacementScores.html) and [GetInstanceTypesFromInstanceRequirements](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetInstanceTypesFromInstanceRequirements.html) .\n\n> Only one of `SpotMaxPricePercentageOverLowestPrice` or `MaxSpotPriceAsPercentageOfOptimalOnDemandPrice` can be specified. If you don't specify either, Amazon EC2 will automatically apply optimal price protection to consistently select from a wide range of instance types. To indicate no price protection threshold for Spot Instances, meaning you want to consider all instance types that match your attributes, include one of these parameters and specify a high value, such as `999999` . \n\nDefault: `100`", "title": "SpotMaxPricePercentageOverLowestPrice", "type": "number" }, @@ -71313,7 +71311,7 @@ "type": "boolean" }, "SpotMaxPricePercentageOverLowestPrice": { - "markdownDescription": "[Price protection] The price protection threshold for Spot Instances, as a percentage higher than an identified Spot price. The identified Spot price is the Spot price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified Spot price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose Spot price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nTo indicate no price protection threshold, specify a high value, such as `999999` .\n\nIf you set `TargetCapacityUnitType` to `vcpu` or `memory-mib` , the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price.\n\nThis parameter is not supported for [GetSpotPlacementScores](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetSpotPlacementScores.html) and [GetInstanceTypesFromInstanceRequirements](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetInstanceTypesFromInstanceRequirements.html) .\n\n> Only one of `SpotMaxPricePercentageOverLowestPrice` or `MaxSpotPriceAsPercentageOfOptimalOnDemandPrice` can be specified. If you don't specify either, then `SpotMaxPricePercentageOverLowestPrice` is used and the value for that parameter defaults to `100` . \n\nDefault: `100`", + "markdownDescription": "[Price protection] The price protection threshold for Spot Instances, as a percentage higher than an identified Spot price. The identified Spot price is the Spot price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified Spot price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose Spot price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nIf you set `TargetCapacityUnitType` to `vcpu` or `memory-mib` , the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price.\n\nThis parameter is not supported for [GetSpotPlacementScores](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetSpotPlacementScores.html) and [GetInstanceTypesFromInstanceRequirements](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetInstanceTypesFromInstanceRequirements.html) .\n\n> Only one of `SpotMaxPricePercentageOverLowestPrice` or `MaxSpotPriceAsPercentageOfOptimalOnDemandPrice` can be specified. If you don't specify either, Amazon EC2 will automatically apply optimal price protection to consistently select from a wide range of instance types. To indicate no price protection threshold for Spot Instances, meaning you want to consider all instance types that match your attributes, include one of these parameters and specify a high value, such as `999999` . \n\nDefault: `100`", "title": "SpotMaxPricePercentageOverLowestPrice", "type": "number" }, @@ -71383,7 +71381,7 @@ "type": "string" }, "WeightedCapacity": { - "markdownDescription": "The number of units provided by the specified instance type.", + "markdownDescription": "The number of units provided by the specified instance type.\n\n> When specifying weights, the price used in the `lowest-price` and `price-capacity-optimized` allocation strategies is per *unit* hour (where the instance price is divided by the specified weight). However, if all the specified weights are above the requested `TargetCapacity` , resulting in only 1 instance being launched, the price used is per *instance* hour.", "title": "WeightedCapacity", "type": "number" } @@ -83543,11 +83541,13 @@ }, "Properties": { "additionalProperties": true, + "markdownDescription": "", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" } }, + "title": "Properties", "type": "object" } }, @@ -83896,7 +83896,7 @@ "type": "array" }, "PreferredMaintenanceWindow": { - "markdownDescription": "Specifies the weekly time range during which maintenance on the cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid values for `ddd` are:\n\nSpecifies the weekly time range during which maintenance on the cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period.\n\nValid values for `ddd` are:\n\n- `sun`\n- `mon`\n- `tue`\n- `wed`\n- `thu`\n- `fri`\n- `sat`\n\nExample: `sun:23:00-mon:01:30`", + "markdownDescription": "Specifies the weekly time range during which maintenance on the cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period.\n\nValid values for `ddd` are:\n\n- `sun`\n- `mon`\n- `tue`\n- `wed`\n- `thu`\n- `fri`\n- `sat`\n\nExample: `sun:23:00-mon:01:30`", "title": "PreferredMaintenanceWindow", "type": "string" }, @@ -84912,7 +84912,7 @@ }, "Endpoint": { "$ref": "#/definitions/AWS::ElastiCache::ServerlessCache.Endpoint", - "markdownDescription": "Represents the information required for client programs to connect to a cache node.", + "markdownDescription": "Represents the information required for client programs to connect to a cache node. This value is read-only.", "title": "Endpoint" }, "Engine": { @@ -84937,7 +84937,7 @@ }, "ReaderEndpoint": { "$ref": "#/definitions/AWS::ElastiCache::ServerlessCache.Endpoint", - "markdownDescription": "Represents the information required for client programs to connect to a cache node.", + "markdownDescription": "Represents the information required for client programs to connect to a cache node. This value is read-only.", "title": "ReaderEndpoint" }, "SecurityGroupIds": { @@ -85212,7 +85212,7 @@ }, "AuthenticationMode": { "$ref": "#/definitions/AWS::ElastiCache::User.AuthenticationMode", - "markdownDescription": "Specifies the authentication mode to use. Below is an example of the possible JSON values:\n\n```\n{ Type: Passwords: [\"*****\", \"******\"] // If Type is password.\n}\n```", + "markdownDescription": "Specifies the authentication mode to use. Below is an example of the possible JSON values:\n\n```\n{ Passwords: [\"*****\", \"******\"] // If Type is password.\n}\n```", "title": "AuthenticationMode" }, "Engine": { @@ -86018,7 +86018,7 @@ "items": { "type": "string" }, - "markdownDescription": "The Availability Zones for the load balancer. For load balancers in a VPC, specify `Subnets` instead.\n\nUpdate requires replacement if you did not previously specify an Availability Zone or if you are removing all Availability Zones. Otherwise, update requires no interruption.", + "markdownDescription": "The Availability Zones for a load balancer in a default VPC. For a load balancer in a nondefault VPC, specify `Subnets` instead.\n\nUpdate requires replacement if you did not previously specify an Availability Zone or if you are removing all Availability Zones. Otherwise, update requires no interruption.", "title": "AvailabilityZones", "type": "array" }, @@ -86441,7 +86441,7 @@ "type": "string" }, "SslPolicy": { - "markdownDescription": "[HTTPS and TLS listeners] The security policy that defines which protocols and ciphers are supported.\n\nFor more information, see [Security policies](https://docs.aws.amazon.com/elasticloadbalancing/latest/application/create-https-listener.html#describe-ssl-policies) in the *Application Load Balancers Guide* and [Security policies](https://docs.aws.amazon.com/elasticloadbalancing/latest/network/create-tls-listener.html#describe-ssl-policies) in the *Network Load Balancers Guide* .", + "markdownDescription": "[HTTPS and TLS listeners] The security policy that defines which protocols and ciphers are supported.\n\nUpdating the security policy can result in interruptions if the load balancer is handling a high volume of traffic.\n\nFor more information, see [Security policies](https://docs.aws.amazon.com/elasticloadbalancing/latest/application/create-https-listener.html#describe-ssl-policies) in the *Application Load Balancers Guide* and [Security policies](https://docs.aws.amazon.com/elasticloadbalancing/latest/network/create-tls-listener.html#describe-ssl-policies) in the *Network Load Balancers Guide* .", "title": "SslPolicy", "type": "string" } @@ -92115,11 +92115,13 @@ }, "Parameters": { "additionalProperties": true, + "markdownDescription": "The parameters for the action.", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" } }, + "title": "Parameters", "type": "object" }, "StartAfter": { @@ -92132,11 +92134,13 @@ }, "Targets": { "additionalProperties": true, + "markdownDescription": "The targets for the action.", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" } }, + "title": "Targets", "type": "object" } }, @@ -92166,7 +92170,7 @@ "properties": { "CloudWatchLogsConfiguration": { "$ref": "#/definitions/AWS::FIS::ExperimentTemplate.CloudWatchLogsConfiguration", - "markdownDescription": "The configuration for experiment logging to Amazon CloudWatch Logs.", + "markdownDescription": "The configuration for experiment logging to CloudWatch Logs .", "title": "CloudWatchLogsConfiguration" }, "LogSchemaVersion": { @@ -92176,7 +92180,7 @@ }, "S3Configuration": { "$ref": "#/definitions/AWS::FIS::ExperimentTemplate.S3Configuration", - "markdownDescription": "The configuration for experiment logging to Amazon S3.", + "markdownDescription": "The configuration for experiment logging to Amazon S3 .", "title": "S3Configuration" } }, @@ -92217,7 +92221,7 @@ }, "Parameters": { "additionalProperties": true, - "markdownDescription": "The resource type parameters.", + "markdownDescription": "The parameters for the resource type.", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" @@ -94018,7 +94022,7 @@ "type": "string" }, "SecurityStyle": { - "markdownDescription": "Specifies the security style for the volume. If a volume's security style is not specified, it is automatically set to the root volume's security style. The security style determines the type of permissions that FSx for ONTAP uses to control data access. For more information, see [Volume security style](https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/managing-volumes.html#volume-security-style) in the *Amazon FSx for NetApp ONTAP User Guide* . Specify one of the following values:\n\n- `UNIX` if the file system is managed by a UNIX administrator, the majority of users are NFS clients, and an application accessing the data uses a UNIX user as the service account.\n- `NTFS` if the file system is managed by a Windows administrator, the majority of users are SMB clients, and an application accessing the data uses a Windows user as the service account.\n- `MIXED` if the file system is managed by both UNIX and Windows administrators and users consist of both NFS and SMB clients.", + "markdownDescription": "Specifies the security style for the volume. If a volume's security style is not specified, it is automatically set to the root volume's security style. The security style determines the type of permissions that FSx for ONTAP uses to control data access. For more information, see [Volume security style](https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/volume-security-style) in the *Amazon FSx for NetApp ONTAP User Guide* . Specify one of the following values:\n\n- `UNIX` if the file system is managed by a UNIX administrator, the majority of users are NFS clients, and an application accessing the data uses a UNIX user as the service account.\n- `NTFS` if the file system is managed by a Windows administrator, the majority of users are SMB clients, and an application accessing the data uses a Windows user as the service account.\n- `MIXED` This is an advanced setting. For more information, see the topic [What the security styles and their effects are](https://docs.aws.amazon.com/https://docs.netapp.com/us-en/ontap/nfs-admin/security-styles-their-effects-concept.html) in the NetApp Documentation Center.\n\nFor more information, see [Volume security style](https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/volume-security-style.html) in the FSx for ONTAP User Guide.", "title": "SecurityStyle", "type": "string" }, @@ -94028,7 +94032,7 @@ "type": "string" }, "SizeInMegabytes": { - "markdownDescription": "*This property has been deprecated. Use `SizeInBytes` .*\n\nSpecifies the size of the volume, in megabytes (MB), that you are creating.", + "markdownDescription": "Use `SizeInBytes` instead. Specifies the size of the volume, in megabytes (MB), that you are creating.", "title": "SizeInMegabytes", "type": "string" }, @@ -97829,7 +97833,7 @@ "items": { "type": "string" }, - "markdownDescription": "", + "markdownDescription": "Indicates whether the CSV file contains custom data types.", "title": "ContainsCustomDatatype", "type": "array" }, @@ -97839,7 +97843,7 @@ "type": "string" }, "CustomDatatypeConfigured": { - "markdownDescription": "Enables the custom datatype to be configured.", + "markdownDescription": "Enables the configuration of custom data types.", "title": "CustomDatatypeConfigured", "type": "boolean" }, @@ -98304,7 +98308,7 @@ "items": { "type": "string" }, - "markdownDescription": "A list of glob patterns used to exclude from the crawl. For more information, see [Catalog Tables with a Crawler](https://docs.aws.amazon.com/glue/latest/dg/add-crawler.html) .", + "markdownDescription": "A list of global patterns used to exclude from the crawl.", "title": "Exclusions", "type": "array" }, @@ -98317,7 +98321,7 @@ "items": { "type": "string" }, - "markdownDescription": "One or more Amazon S3 paths that contains Iceberg metadata folders as `s3://bucket/prefix` .", + "markdownDescription": "One or more Amazon S3 paths that contains Iceberg metadata folders as s3://bucket/prefix .", "title": "Paths", "type": "array" } @@ -98472,7 +98476,7 @@ "items": { "$ref": "#/definitions/AWS::Glue::Crawler.IcebergTarget" }, - "markdownDescription": "", + "markdownDescription": "Specifies Apache Iceberg data store targets.", "title": "IcebergTargets", "type": "array" }, @@ -98557,7 +98561,7 @@ "type": "string" }, "Tags": { - "markdownDescription": "", + "markdownDescription": "AWS tags that contain a key value pair and may be searched by console, command line, or API.", "title": "Tags", "type": "object" } @@ -98907,7 +98911,7 @@ "type": "string" }, "Region": { - "markdownDescription": "Region of the target database.", + "markdownDescription": "The Region of the database.", "title": "Region", "type": "string" } @@ -100529,7 +100533,7 @@ }, "OpenTableFormatInput": { "$ref": "#/definitions/AWS::Glue::Table.OpenTableFormatInput", - "markdownDescription": "A structure representing an open format table.", + "markdownDescription": "Specifies an `OpenTableFormatInput` structure when creating an open format table.", "title": "OpenTableFormatInput" }, "TableInput": { @@ -100595,7 +100599,7 @@ "properties": { "MetadataOperation": { "$ref": "#/definitions/AWS::Glue::Table.MetadataOperation", - "markdownDescription": "A required metadata operation. Can only be set to `CREATE` .", + "markdownDescription": "A required metadata operation. Can only be set to CREATE.", "title": "MetadataOperation" }, "Version": { @@ -100831,7 +100835,7 @@ "type": "string" }, "Region": { - "markdownDescription": "Region of the target table.", + "markdownDescription": "The Region of the table.", "title": "Region", "type": "string" } @@ -105389,12 +105393,12 @@ "additionalProperties": false, "properties": { "ContactPostPassDurationSeconds": { - "markdownDescription": "Amount of time in seconds after a contact ends that you\u2019d like to receive a CloudWatch Event indicating the pass has finished. For more information on CloudWatch Events, see the [What Is CloudWatch Events?](https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/WhatIsCloudWatchEvents.html)", + "markdownDescription": "Amount of time in seconds after a contact ends that you\u2019d like to receive a Ground Station Contact State Change indicating the pass has finished.", "title": "ContactPostPassDurationSeconds", "type": "number" }, "ContactPrePassDurationSeconds": { - "markdownDescription": "Amount of time in seconds prior to contact start that you'd like to receive a CloudWatch Event indicating an upcoming pass. For more information on CloudWatch Events, see the [What Is CloudWatch Events?](https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/WhatIsCloudWatchEvents.html)", + "markdownDescription": "Amount of time in seconds prior to contact start that you'd like to receive a Ground Station Contact State Change Event indicating an upcoming pass.", "title": "ContactPrePassDurationSeconds", "type": "number" }, @@ -106102,6 +106106,8 @@ "type": "string" }, "MasterId": { + "markdownDescription": "The AWS account ID of the account designated as the GuardDuty administrator account.", + "title": "MasterId", "type": "string" } }, @@ -106183,6 +106189,8 @@ "type": "string" }, "MemberId": { + "markdownDescription": "The AWS account ID of the account to designate as a member.", + "title": "MemberId", "type": "string" }, "Message": { @@ -109742,17 +109750,17 @@ "additionalProperties": false, "properties": { "ContainerRecipeArn": { - "markdownDescription": "The Amazon Resource Name (ARN) of the container recipe that is used for this pipeline.", + "markdownDescription": "The Amazon Resource Name (ARN) of the container recipe that defines how images are configured and tested.", "title": "ContainerRecipeArn", "type": "string" }, "DistributionConfigurationArn": { - "markdownDescription": "The Amazon Resource Name (ARN) of the distribution configuration.", + "markdownDescription": "The Amazon Resource Name (ARN) of the distribution configuration that defines and configures the outputs of your pipeline.", "title": "DistributionConfigurationArn", "type": "string" }, "EnhancedImageMetadataEnabled": { - "markdownDescription": "Indicates whether Image Builder collects additional information about the image, such as the operating system (OS) version and package list.", + "markdownDescription": "Collects additional information about the image being created, including the operating system (OS) version and package list. This information is used to enhance the overall experience of using EC2 Image Builder. Enabled by default.", "title": "EnhancedImageMetadataEnabled", "type": "boolean" }, @@ -109762,7 +109770,7 @@ "type": "string" }, "ImageRecipeArn": { - "markdownDescription": "The Amazon Resource Name (ARN) of the image recipe.", + "markdownDescription": "The Amazon Resource Name (ARN) of the image recipe that defines how images are configured, tested, and assessed.", "title": "ImageRecipeArn", "type": "string" }, @@ -109773,17 +109781,17 @@ }, "ImageTestsConfiguration": { "$ref": "#/definitions/AWS::ImageBuilder::Image.ImageTestsConfiguration", - "markdownDescription": "The configuration settings for your image test components, which includes a toggle that allows you to turn off tests, and a timeout setting.", + "markdownDescription": "The image tests configuration of the image.", "title": "ImageTestsConfiguration" }, "InfrastructureConfigurationArn": { - "markdownDescription": "The Amazon Resource Name (ARN) of the infrastructure configuration associated with this image pipeline.", + "markdownDescription": "The Amazon Resource Name (ARN) of the infrastructure configuration that defines the environment in which your image will be built and tested.", "title": "InfrastructureConfigurationArn", "type": "string" }, "Tags": { "additionalProperties": true, - "markdownDescription": "The tags that apply to this image.", + "markdownDescription": "The tags of the image.", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" @@ -109796,7 +109804,7 @@ "items": { "$ref": "#/definitions/AWS::ImageBuilder::Image.WorkflowConfiguration" }, - "markdownDescription": "Contains the build and test workflows that are associated with the image.", + "markdownDescription": "Contains an array of workflow configuration objects.", "title": "Workflows", "type": "array" } @@ -115528,7 +115536,7 @@ "additionalProperties": false, "properties": { "RuleName": { - "markdownDescription": "The name of the rule.", + "markdownDescription": "The name of the rule.\n\n*Pattern* : `[a-zA-Z0-9:_-]+`", "title": "RuleName", "type": "string" }, @@ -118772,7 +118780,7 @@ }, "Payload": { "$ref": "#/definitions/AWS::IoTEvents::AlarmModel.Payload", - "markdownDescription": "You can configure the action payload when you send a message to an Amazon Kinesis Data Firehose delivery stream.", + "markdownDescription": "You can configure the action payload when you send a message to an Amazon Data Firehose delivery stream.", "title": "Payload" }, "Separator": { @@ -119372,7 +119380,7 @@ }, "Payload": { "$ref": "#/definitions/AWS::IoTEvents::DetectorModel.Payload", - "markdownDescription": "You can configure the action payload when you send a message to an Amazon Kinesis Data Firehose delivery stream.", + "markdownDescription": "You can configure the action payload when you send a message to an Amazon Data Firehose delivery stream.", "title": "Payload" }, "Separator": { @@ -126589,7 +126597,7 @@ }, "S3Configuration": { "$ref": "#/definitions/AWS::Kendra::DataSource.S3DataSourceConfiguration", - "markdownDescription": "Provides the configuration information to connect to an Amazon S3 bucket as your data source.", + "markdownDescription": "Provides the configuration information to connect to an Amazon S3 bucket as your data source.\n\n> Amazon Kendra now supports an upgraded Amazon S3 connector.\n> \n> You must now use the [TemplateConfiguration](https://docs.aws.amazon.com/kendra/latest/APIReference/API_TemplateConfiguration.html) object instead of the `S3DataSourceConfiguration` object to configure your connector.\n> \n> Connectors configured using the older console and API architecture will continue to function as configured. However, you won't be able to edit or update them. If you want to edit or update your connector configuration, you must create a new connector.\n> \n> We recommended migrating your connector workflow to the upgraded version. Support for connectors configured using the older architecture is scheduled to end by June 2024.", "title": "S3Configuration" }, "SalesforceConfiguration": { @@ -127030,7 +127038,7 @@ "items": { "type": "string" }, - "markdownDescription": "A list of glob patterns (patterns that can expand a wildcard pattern into a list of path names that match the given pattern) for file names and file types that should not be indexed. If a document that matches an inclusion prefix or inclusion pattern also matches an exclusion pattern, the document is not indexed. Examples of glob patterns include:\n\n- `/myapp/config/*` - All files inside config directory\n- `/**/*.png` - All .png files in all directories\n- `/**/*.{png,ico,md}` - All .png, .ico or .md files in all directories\n- `/myapp/src/**/*.ts` - All .ts files inside src directory (and all its subdirectories)\n- `**/!(*.module).ts` - All .ts files but not .module.ts\n- **.png , *.jpg* will exclude all PNG and JPEG image files in a directory (files with the extensions .png and .jpg).\n- **internal** will exclude all files in a directory that contain 'internal' in the file name, such as 'internal', 'internal_only', 'company_internal'.\n- ***/*internal** will exclude all internal-related files in a directory and its subdirectories.\n\nFor more examples, see [Use of Exclude and Include Filters](https://docs.aws.amazon.com/cli/latest/reference/s3/#use-of-exclude-and-include-filters) in the AWS CLI Command Reference.", + "markdownDescription": "A list of glob patterns (patterns that can expand a wildcard pattern into a list of path names that match the given pattern) for certain file names and file types to exclude from your index. If a document matches both an inclusion and exclusion prefix or pattern, the exclusion prefix takes precendence and the document is not indexed. Examples of glob patterns include:\n\n- `/myapp/config/*` - All files inside config directory\n- `/**/*.png` - All .png files in all directories\n- `/**/*.{png,ico,md}` - All .png, .ico or .md files in all directories\n- `/myapp/src/**/*.ts` - All .ts files inside src directory (and all its subdirectories)\n- `**/!(*.module).ts` - All .ts files but not .module.ts\n- **.png , *.jpg* excludes all PNG and JPEG image files in a directory (files with the extensions .png and .jpg).\n- **internal** excludes all files in a directory that contain 'internal' in the file name, such as 'internal', 'internal_only', 'company_internal'.\n- ***/*internal** excludes all internal-related files in a directory and its subdirectories.\n\nFor more examples, see [Use of Exclude and Include Filters](https://docs.aws.amazon.com/cli/latest/reference/s3/#use-of-exclude-and-include-filters) in the AWS CLI Command Reference.", "title": "ExclusionPatterns", "type": "array" }, @@ -127038,7 +127046,7 @@ "items": { "type": "string" }, - "markdownDescription": "A list of glob patterns for documents that should be indexed. If a document that matches an inclusion pattern also matches an exclusion pattern, the document is not indexed.\n\nSome [examples](https://docs.aws.amazon.com/cli/latest/reference/s3/#use-of-exclude-and-include-filters) are:\n\n- **.txt* will include all text files in a directory (files with the extension .txt).\n- ***/*.txt* will include all text files in a directory and its subdirectories.\n- **tax** will include all files in a directory that contain 'tax' in the file name, such as 'tax', 'taxes', 'income_tax'.", + "markdownDescription": "A list of glob patterns (patterns that can expand a wildcard pattern into a list of path names that match the given pattern) for certain file names and file types to include in your index. If a document matches both an inclusion and exclusion prefix or pattern, the exclusion prefix takes precendence and the document is not indexed. Examples of glob patterns include:\n\n- `/myapp/config/*` - All files inside config directory\n- `/**/*.png` - All .png files in all directories\n- `/**/*.{png,ico,md}` - All .png, .ico or .md files in all directories\n- `/myapp/src/**/*.ts` - All .ts files inside src directory (and all its subdirectories)\n- `**/!(*.module).ts` - All .ts files but not .module.ts\n- **.png , *.jpg* includes all PNG and JPEG image files in a directory (files with the extensions .png and .jpg).\n- **internal** includes all files in a directory that contain 'internal' in the file name, such as 'internal', 'internal_only', 'company_internal'.\n- ***/*internal** includes all internal-related files in a directory and its subdirectories.\n\nFor more examples, see [Use of Exclude and Include Filters](https://docs.aws.amazon.com/cli/latest/reference/s3/#use-of-exclude-and-include-filters) in the AWS CLI Command Reference.", "title": "InclusionPatterns", "type": "array" }, @@ -138622,7 +138630,7 @@ }, "PrivateRegistryAccess": { "$ref": "#/definitions/AWS::Lightsail::Container.PrivateRegistryAccess", - "markdownDescription": "An object that describes the configuration for the container service to access private container image repositories, such as Amazon Elastic Container Registry ( Amazon ECR ) private repositories.\n\nFor more information, see [Configuring access to an Amazon ECR private repository for an Amazon Lightsail container service](https://docs.aws.amazon.com/latest/userguide/amazon-lightsail-container-service-ecr-private-repo-access) in the *Amazon Lightsail Developer Guide* .", + "markdownDescription": "An object that describes the configuration for the container service to access private container image repositories, such as Amazon Elastic Container Registry ( Amazon ECR ) private repositories.\n\nFor more information, see [Configuring access to an Amazon ECR private repository for an Amazon Lightsail container service](https://docs.aws.amazon.com/lightsail/latest/userguide/amazon-lightsail-container-service-ecr-private-repo-access) in the *Amazon Lightsail Developer Guide* .", "title": "PrivateRegistryAccess" }, "PublicDomainNames": { @@ -140878,7 +140886,7 @@ "additionalProperties": false, "properties": { "PolicyDocument": { - "markdownDescription": "Specify the policy, in JSON.\n\n*Data protection policy*\n\nA data protection policy must include two JSON blocks:\n\n- The first block must include both a `DataIdentifer` array and an `Operation` property with an `Audit` action. The `DataIdentifer` array lists the types of sensitive data that you want to mask. For more information about the available options, see [Types of data that you can mask](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/mask-sensitive-log-data-types.html) .\n\nThe `Operation` property with an `Audit` action is required to find the sensitive data terms. This `Audit` action must contain a `FindingsDestination` object. You can optionally use that `FindingsDestination` object to list one or more destinations to send audit findings to. If you specify destinations such as log groups, Kinesis Data Firehose streams, and S3 buckets, they must already exist.\n- The second block must include both a `DataIdentifer` array and an `Operation` property with an `Deidentify` action. The `DataIdentifer` array must exactly match the `DataIdentifer` array in the first block of the policy.\n\nThe `Operation` property with the `Deidentify` action is what actually masks the data, and it must contain the `\"MaskConfig\": {}` object. The `\"MaskConfig\": {}` object must be empty.\n\n> The contents of the two `DataIdentifer` arrays must match exactly. \n\nIn addition to the two JSON blocks, the `policyDocument` can also include `Name` , `Description` , and `Version` fields. The `Name` is different than the operation's `policyName` parameter, and is used as a dimension when CloudWatch Logs reports audit findings metrics to CloudWatch .\n\nThe JSON specified in `policyDocument` can be up to 30,720 characters long.\n\n*Subscription filter policy*\n\nA subscription filter policy can include the following attributes in a JSON block:\n\n- *DestinationArn* The ARN of the destination to deliver log events to. Supported destinations are:\n\n- An Kinesis Data Streams data stream in the same account as the subscription policy, for same-account delivery.\n- An Kinesis Data Firehose data stream in the same account as the subscription policy, for same-account delivery.\n- A Lambda function in the same account as the subscription policy, for same-account delivery.\n- A logical destination in a different account created with [PutDestination](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDestination.html) , for cross-account delivery. Kinesis Data Streams and Kinesis Data Firehose are supported as logical destinations.\n- *RoleArn* The ARN of an IAM role that grants CloudWatch Logs permissions to deliver ingested log events to the destination stream. You don't need to provide the ARN when you are working with a logical destination for cross-account delivery.\n- *FilterPattern* A filter pattern for subscribing to a filtered stream of log events.\n- *Distribution* The method used to distribute log data to the destination. By default, log data is grouped by log stream, but the grouping can be set to `Random` for a more even distribution. This property is only applicable when the destination is an Kinesis Data Streams data stream.", + "markdownDescription": "Specify the policy, in JSON.\n\n*Data protection policy*\n\nA data protection policy must include two JSON blocks:\n\n- The first block must include both a `DataIdentifer` array and an `Operation` property with an `Audit` action. The `DataIdentifer` array lists the types of sensitive data that you want to mask. For more information about the available options, see [Types of data that you can mask](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/mask-sensitive-log-data-types.html) .\n\nThe `Operation` property with an `Audit` action is required to find the sensitive data terms. This `Audit` action must contain a `FindingsDestination` object. You can optionally use that `FindingsDestination` object to list one or more destinations to send audit findings to. If you specify destinations such as log groups, Firehose streams, and S3 buckets, they must already exist.\n- The second block must include both a `DataIdentifer` array and an `Operation` property with an `Deidentify` action. The `DataIdentifer` array must exactly match the `DataIdentifer` array in the first block of the policy.\n\nThe `Operation` property with the `Deidentify` action is what actually masks the data, and it must contain the `\"MaskConfig\": {}` object. The `\"MaskConfig\": {}` object must be empty.\n\n> The contents of the two `DataIdentifer` arrays must match exactly. \n\nIn addition to the two JSON blocks, the `policyDocument` can also include `Name` , `Description` , and `Version` fields. The `Name` is different than the operation's `policyName` parameter, and is used as a dimension when CloudWatch Logs reports audit findings metrics to CloudWatch .\n\nThe JSON specified in `policyDocument` can be up to 30,720 characters long.\n\n*Subscription filter policy*\n\nA subscription filter policy can include the following attributes in a JSON block:\n\n- *DestinationArn* The ARN of the destination to deliver log events to. Supported destinations are:\n\n- An Kinesis Data Streams data stream in the same account as the subscription policy, for same-account delivery.\n- An Firehose data stream in the same account as the subscription policy, for same-account delivery.\n- A Lambda function in the same account as the subscription policy, for same-account delivery.\n- A logical destination in a different account created with [PutDestination](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDestination.html) , for cross-account delivery. Kinesis Data Streams and Firehose are supported as logical destinations.\n- *RoleArn* The ARN of an IAM role that grants CloudWatch Logs permissions to deliver ingested log events to the destination stream. You don't need to provide the ARN when you are working with a logical destination for cross-account delivery.\n- *FilterPattern* A filter pattern for subscribing to a filtered stream of log events.\n- *Distribution* The method used to distribute log data to the destination. By default, log data is grouped by log stream, but the grouping can be set to `Random` for a more even distribution. This property is only applicable when the destination is an Kinesis Data Streams data stream.", "title": "PolicyDocument", "type": "string" }, @@ -141053,7 +141061,7 @@ "type": "object" }, "DestinationResourceArn": { - "markdownDescription": "The ARN of the AWS destination that this delivery destination represents. That AWS destination can be a log group in CloudWatch Logs, an Amazon S3 bucket, or a delivery stream in Kinesis Data Firehose.", + "markdownDescription": "The ARN of the AWS destination that this delivery destination represents. That AWS destination can be a log group in CloudWatch Logs, an Amazon S3 bucket, or a delivery stream in Firehose.", "title": "DestinationResourceArn", "type": "string" }, @@ -157782,28 +157790,42 @@ "additionalProperties": false, "properties": { "DeletionProtection": { + "markdownDescription": "A value that indicates whether the graph has deletion protection enabled. The graph can't be deleted when deletion protection is enabled.", + "title": "DeletionProtection", "type": "boolean" }, "GraphName": { + "markdownDescription": "The graph name. For example: `my-graph-1` .\n\nThe name must contain from 1 to 63 letters, numbers, or hyphens, and its first character must be a letter. It cannot end with a hyphen or contain two consecutive hyphens.\n\nIf you don't specify a graph name, a unique graph name is generated for you using the prefix `graph-for` , followed by a combination of `Stack Name` and a `UUID` .", + "title": "GraphName", "type": "string" }, "ProvisionedMemory": { + "markdownDescription": "The provisioned memory-optimized Neptune Capacity Units (m-NCUs) to use for the graph.\n\nMin = 128", + "title": "ProvisionedMemory", "type": "number" }, "PublicConnectivity": { + "markdownDescription": "Specifies whether or not the graph can be reachable over the internet. All access to graphs is IAM authenticated.\n\nWhen the graph is publicly available, its domain name system (DNS) endpoint resolves to the public IP address from the internet. When the graph isn't publicly available, you need to create a `PrivateGraphEndpoint` in a given VPC to ensure the DNS name resolves to a private IP address that is reachable from the VPC.\n\nDefault: If not specified, the default value is false.\n\n> If enabling public connectivity for the first time, there will be a delay while it is enabled.", + "title": "PublicConnectivity", "type": "boolean" }, "ReplicaCount": { + "markdownDescription": "The number of replicas in other AZs.\n\nDefault: If not specified, the default value is 1.", + "title": "ReplicaCount", "type": "number" }, "Tags": { "items": { "$ref": "#/definitions/Tag" }, + "markdownDescription": "Adds metadata tags to the new graph. These tags can also be used with cost allocation reporting, or used in a Condition statement in an IAM policy.", + "title": "Tags", "type": "array" }, "VectorSearchConfiguration": { - "$ref": "#/definitions/AWS::NeptuneGraph::Graph.VectorSearchConfiguration" + "$ref": "#/definitions/AWS::NeptuneGraph::Graph.VectorSearchConfiguration", + "markdownDescription": "Specifies the number of dimensions for vector embeddings that will be loaded into the graph. The value is specified as `dimension=` value. Max = 65,535", + "title": "VectorSearchConfiguration" } }, "required": [ @@ -157836,6 +157858,8 @@ "additionalProperties": false, "properties": { "VectorSearchDimension": { + "markdownDescription": "The number of dimensions.", + "title": "VectorSearchDimension", "type": "number" } }, @@ -157880,21 +157904,29 @@ "additionalProperties": false, "properties": { "GraphIdentifier": { + "markdownDescription": "The unique identifier of the Neptune Analytics graph.", + "title": "GraphIdentifier", "type": "string" }, "SecurityGroupIds": { "items": { "type": "string" }, + "markdownDescription": "Security groups to be attached to the private graph endpoint..", + "title": "SecurityGroupIds", "type": "array" }, "SubnetIds": { "items": { "type": "string" }, + "markdownDescription": "Subnets in which private graph endpoint ENIs are created.", + "title": "SubnetIds", "type": "array" }, "VpcId": { + "markdownDescription": "The VPC in which the private graph endpoint needs to be created.", + "title": "VpcId", "type": "string" } }, @@ -158466,7 +158498,7 @@ "properties": { "LogDestination": { "additionalProperties": true, - "markdownDescription": "The named location for the logs, provided in a key:value mapping that is specific to the chosen destination type.\n\n- For an Amazon S3 bucket, provide the name of the bucket, with key `bucketName` , and optionally provide a prefix, with key `prefix` . The following example specifies an Amazon S3 bucket named `DOC-EXAMPLE-BUCKET` and the prefix `alerts` :\n\n`\"LogDestination\": { \"bucketName\": \"DOC-EXAMPLE-BUCKET\", \"prefix\": \"alerts\" }`\n- For a CloudWatch log group, provide the name of the CloudWatch log group, with key `logGroup` . The following example specifies a log group named `alert-log-group` :\n\n`\"LogDestination\": { \"logGroup\": \"alert-log-group\" }`\n- For a Kinesis Data Firehose delivery stream, provide the name of the delivery stream, with key `deliveryStream` . The following example specifies a delivery stream named `alert-delivery-stream` :\n\n`\"LogDestination\": { \"deliveryStream\": \"alert-delivery-stream\" }`", + "markdownDescription": "The named location for the logs, provided in a key:value mapping that is specific to the chosen destination type.\n\n- For an Amazon S3 bucket, provide the name of the bucket, with key `bucketName` , and optionally provide a prefix, with key `prefix` . The following example specifies an Amazon S3 bucket named `DOC-EXAMPLE-BUCKET` and the prefix `alerts` :\n\n`\"LogDestination\": { \"bucketName\": \"DOC-EXAMPLE-BUCKET\", \"prefix\": \"alerts\" }`\n- For a CloudWatch log group, provide the name of the CloudWatch log group, with key `logGroup` . The following example specifies a log group named `alert-log-group` :\n\n`\"LogDestination\": { \"logGroup\": \"alert-log-group\" }`\n- For a Firehose delivery stream, provide the name of the delivery stream, with key `deliveryStream` . The following example specifies a delivery stream named `alert-delivery-stream` :\n\n`\"LogDestination\": { \"deliveryStream\": \"alert-delivery-stream\" }`", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" @@ -158476,7 +158508,7 @@ "type": "object" }, "LogDestinationType": { - "markdownDescription": "The type of storage destination to send these logs to. You can send logs to an Amazon S3 bucket, a CloudWatch log group, or a Kinesis Data Firehose delivery stream.", + "markdownDescription": "The type of storage destination to send these logs to. You can send logs to an Amazon S3 bucket, a CloudWatch log group, or a Firehose delivery stream.", "title": "LogDestinationType", "type": "string" }, @@ -163014,7 +163046,7 @@ "type": "string" }, "StandbyReplicas": { - "markdownDescription": "Indicates whether standby replicas should be used for a collection.", + "markdownDescription": "Indicates whether to use standby replicas for the collection. You can't update this property after the collection is already created. If you attempt to modify this property, the collection continues to use the original value.", "title": "StandbyReplicas", "type": "string" }, @@ -170335,7 +170367,7 @@ "type": "string" }, "DestinationStreamArn": { - "markdownDescription": "The Amazon Resource Name (ARN) of the Amazon Kinesis Data Stream or Amazon Kinesis Data Firehose delivery stream that you want to publish event data to.\n\nFor a Kinesis Data Stream, the ARN format is: `arn:aws:kinesis: region : account-id :stream/ stream_name`\n\nFor a Kinesis Data Firehose delivery stream, the ARN format is: `arn:aws:firehose: region : account-id :deliverystream/ stream_name`", + "markdownDescription": "The Amazon Resource Name (ARN) of the Amazon Kinesis Data Stream or Amazon Data Firehose delivery stream that you want to publish event data to.\n\nFor a Kinesis Data Stream, the ARN format is: `arn:aws:kinesis: region : account-id :stream/ stream_name`\n\nFor a Firehose delivery stream, the ARN format is: `arn:aws:firehose: region : account-id :deliverystream/ stream_name`", "title": "DestinationStreamArn", "type": "string" }, @@ -172628,7 +172660,7 @@ "additionalProperties": false, "properties": { "DeliveryStreamArn": { - "markdownDescription": "The Amazon Resource Name (ARN) of the Kinesis Data Firehose delivery stream to which EventBridge delivers the pipe log records.", + "markdownDescription": "The Amazon Resource Name (ARN) of the Firehose delivery stream to which EventBridge delivers the pipe log records.", "title": "DeliveryStreamArn", "type": "string" } @@ -172738,7 +172770,7 @@ }, "FirehoseLogDestination": { "$ref": "#/definitions/AWS::Pipes::Pipe.FirehoseLogDestination", - "markdownDescription": "The Amazon Kinesis Data Firehose logging configuration settings for the pipe.", + "markdownDescription": "The Amazon Data Firehose logging configuration settings for the pipe.", "title": "FirehoseLogDestination" }, "IncludeExecutionData": { @@ -215195,7 +215227,7 @@ "type": "number" }, "CACertificateIdentifier": { - "markdownDescription": "The identifier of the CA certificate for this DB instance.\n\nSpecifying or updating this property triggers a reboot. For more information about CA certificate identifiers for RDS DB engines, see [Rotating Your SSL/TLS Certificate](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.SSL-certificate-rotation.html) in the *Amazon RDS User Guide* . For more information about CA certificate identifiers for Aurora DB engines, see [Rotating Your SSL/TLS Certificate](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/UsingWithRDS.SSL-certificate-rotation.html) in the *Amazon Aurora User Guide* .", + "markdownDescription": "The identifier of the CA certificate for this DB instance.\n\nFor more information, see [Using SSL/TLS to encrypt a connection to a DB instance](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.SSL.html) in the *Amazon RDS User Guide* and [Using SSL/TLS to encrypt a connection to a DB cluster](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/UsingWithRDS.SSL.html) in the *Amazon Aurora User Guide* .", "title": "CACertificateIdentifier", "type": "string" }, @@ -215230,12 +215262,12 @@ "type": "string" }, "DBClusterSnapshotIdentifier": { - "markdownDescription": "The identifier for the RDS for MySQL Multi-AZ DB cluster snapshot to restore from.\n\nFor more information on Multi-AZ DB clusters, see [Multi-AZ DB cluster deployments](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/multi-az-db-clusters-concepts.html) in the *Amazon RDS User Guide* .\n\nConstraints:\n\n- Must match the identifier of an existing Multi-AZ DB cluster snapshot.\n- Can't be specified when `DBSnapshotIdentifier` is specified.\n- Must be specified when `DBSnapshotIdentifier` isn't specified.\n- If you are restoring from a shared manual Multi-AZ DB cluster snapshot, the `DBClusterSnapshotIdentifier` must be the ARN of the shared snapshot.\n- Can't be the identifier of an Aurora DB cluster snapshot.\n- Can't be the identifier of an RDS for PostgreSQL Multi-AZ DB cluster snapshot.", + "markdownDescription": "The identifier for the Multi-AZ DB cluster snapshot to restore from.\n\nFor more information on Multi-AZ DB clusters, see [Multi-AZ DB cluster deployments](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/multi-az-db-clusters-concepts.html) in the *Amazon RDS User Guide* .\n\nConstraints:\n\n- Must match the identifier of an existing Multi-AZ DB cluster snapshot.\n- Can't be specified when `DBSnapshotIdentifier` is specified.\n- Must be specified when `DBSnapshotIdentifier` isn't specified.\n- If you are restoring from a shared manual Multi-AZ DB cluster snapshot, the `DBClusterSnapshotIdentifier` must be the ARN of the shared snapshot.\n- Can't be the identifier of an Aurora DB cluster snapshot.", "title": "DBClusterSnapshotIdentifier", "type": "string" }, "DBInstanceClass": { - "markdownDescription": "The compute and memory capacity of the DB instance, for example, `db.m4.large` . Not all DB instance classes are available in all AWS Regions, or for all database engines.\n\nFor the full list of DB instance classes, and availability for your engine, see [DB Instance Class](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html) in the *Amazon RDS User Guide.* For more information about DB instance class pricing and AWS Region support for DB instance classes, see [Amazon RDS Pricing](https://docs.aws.amazon.com/rds/pricing/) .", + "markdownDescription": "The compute and memory capacity of the DB instance, for example `db.m5.large` . Not all DB instance classes are available in all AWS Regions , or for all database engines. For the full list of DB instance classes, and availability for your engine, see [DB instance classes](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html) in the *Amazon RDS User Guide* or [Aurora DB instance classes](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Concepts.DBInstanceClass.html) in the *Amazon Aurora User Guide* .", "title": "DBInstanceClass", "type": "string" }, @@ -229956,7 +229988,7 @@ "type": "object" }, "SubscriptionRoleArn": { - "markdownDescription": "This property applies only to Amazon Kinesis Data Firehose delivery stream subscriptions. Specify the ARN of the IAM role that has the following:\n\n- Permission to write to the Amazon Kinesis Data Firehose delivery stream\n- Amazon SNS listed as a trusted entity\n\nSpecifying a valid ARN for this attribute is required for Kinesis Data Firehose delivery stream subscriptions. For more information, see [Fanout to Amazon Kinesis Data Firehose delivery streams](https://docs.aws.amazon.com/sns/latest/dg/sns-firehose-as-subscriber.html) in the *Amazon SNS Developer Guide.*", + "markdownDescription": "This property applies only to Amazon Data Firehose delivery stream subscriptions. Specify the ARN of the IAM role that has the following:\n\n- Permission to write to the Amazon Data Firehose delivery stream\n- Amazon SNS listed as a trusted entity\n\nSpecifying a valid ARN for this attribute is required for Firehose delivery stream subscriptions. For more information, see [Fanout to Amazon Data Firehose delivery streams](https://docs.aws.amazon.com/sns/latest/dg/sns-firehose-as-subscriber.html) in the *Amazon SNS Developer Guide.*", "title": "SubscriptionRoleArn", "type": "string" }, @@ -230716,6 +230748,8 @@ "title": "OutputLocation" }, "Parameters": { + "markdownDescription": "The parameters for the runtime configuration of the document.", + "title": "Parameters", "type": "object" }, "ScheduleExpression": { @@ -230729,7 +230763,7 @@ "type": "number" }, "SyncCompliance": { - "markdownDescription": "The mode for generating association compliance. You can specify `AUTO` or `MANUAL` . In `AUTO` mode, the system uses the status of the association execution to determine the compliance status. If the association execution runs successfully, then the association is `COMPLIANT` . If the association execution doesn't run successfully, the association is `NON-COMPLIANT` .\n\nIn `MANUAL` mode, you must specify the `AssociationId` as a parameter for the PutComplianceItems API action. In this case, compliance data is not managed by State Manager. It is managed by your direct call to the PutComplianceItems API action.\n\nBy default, all associations use `AUTO` mode.", + "markdownDescription": "The mode for generating association compliance. You can specify `AUTO` or `MANUAL` . In `AUTO` mode, the system uses the status of the association execution to determine the compliance status. If the association execution runs successfully, then the association is `COMPLIANT` . If the association execution doesn't run successfully, the association is `NON-COMPLIANT` .\n\nIn `MANUAL` mode, you must specify the `AssociationId` as a parameter for the `PutComplianceItems` API action. In this case, compliance data is not managed by State Manager. It is managed by your direct call to the `PutComplianceItems` API action.\n\nBy default, all associations use `AUTO` mode.", "title": "SyncCompliance", "type": "string" }, @@ -230737,7 +230771,7 @@ "items": { "$ref": "#/definitions/AWS::SSM::Association.Target" }, - "markdownDescription": "The targets for the association. You must specify the `InstanceId` or `Targets` property. You can target all instances in an AWS account by specifying the `InstanceIds` key with a value of `*` . To view a JSON and a YAML example that targets all instances, see \"Create an association for all managed instances in an AWS account \" on the Examples page.", + "markdownDescription": "The targets for the association. You must specify the `InstanceId` or `Targets` property. You can target all instances in an AWS account by specifying t he `InstanceIds` key with a value of `*` .\n\nSupported formats include the following.\n\n- `Key=InstanceIds,Values=,,`\n- `Key=tag-key,Values=,`\n\nTo view a JSON and a YAML example that targets all instances, see \"Create an association for all managed instances in an AWS account \" on the Examples page.", "title": "Targets", "type": "array" }, @@ -230877,7 +230911,7 @@ "type": "object" }, "DocumentFormat": { - "markdownDescription": "Specify the document format for the request. JSON is the default format.", + "markdownDescription": "Specify the document format for the request. `JSON` is the default format.", "title": "DocumentFormat", "type": "string" }, @@ -231070,7 +231104,7 @@ "type": "string" }, "StartDate": { - "markdownDescription": "The date and time, in ISO-8601 Extended format, for when the maintenance window is scheduled to become active. StartDate allows you to delay activation of the Maintenance Window until the specified future date.", + "markdownDescription": "The date and time, in ISO-8601 Extended format, for when the maintenance window is scheduled to become active. `StartDate` allows you to delay activation of the maintenance window until the specified future date.", "title": "StartDate", "type": "string" }, @@ -231421,7 +231455,7 @@ "type": "string" }, "Parameters": { - "markdownDescription": "The parameters for the AUTOMATION task.", + "markdownDescription": "The parameters for the `AUTOMATION` type task.", "title": "Parameters", "type": "object" } @@ -231638,7 +231672,7 @@ "additionalProperties": false, "properties": { "AllowedPattern": { - "markdownDescription": "A regular expression used to validate the parameter value. For example, for String types with values restricted to numbers, you can specify the following: `AllowedPattern=^\\d+$`", + "markdownDescription": "A regular expression used to validate the parameter value. For example, for `String` types with values restricted to numbers, you can specify the following: `AllowedPattern=^\\d+$`", "title": "AllowedPattern", "type": "string" }, @@ -231653,7 +231687,7 @@ "type": "string" }, "Name": { - "markdownDescription": "The name of the parameter.\n\n> The maximum length constraint listed below includes capacity for additional system attributes that aren't part of the name. The maximum length for a parameter name, including the full length of the parameter ARN, is 1011 characters. For example, the length of the following parameter name is 65 characters, not 20 characters: `arn:aws:ssm:us-east-2:111222333444:parameter/ExampleParameterName`", + "markdownDescription": "The name of the parameter.\n\n> The maximum length constraint listed below includes capacity for additional system attributes that aren't part of the name. The maximum length for a parameter name, including the full length of the parameter Amazon Resource Name (ARN), is 1011 characters. For example, the length of the following parameter name is 65 characters, not 20 characters: `arn:aws:ssm:us-east-2:111222333444:parameter/ExampleParameterName`", "title": "Name", "type": "string" }, @@ -231679,7 +231713,7 @@ "type": "string" }, "Type": { - "markdownDescription": "The type of parameter.\n\n> Although `SecureString` is included in the list of valid values, AWS CloudFormation does *not* currently support creating a `SecureString` parameter type.", + "markdownDescription": "The type of parameter.", "title": "Type", "type": "string" }, @@ -232014,7 +232048,7 @@ "type": "string" }, "KMSKeyArn": { - "markdownDescription": "The ARN of an encryption key for a destination in Amazon S3 . You can use a KMS key to encrypt inventory data in Amazon S3 . You must specify a key that exist in the same region as the destination Amazon S3 bucket.", + "markdownDescription": "The Amazon Resource Name (ARN) of an encryption key for a destination in Amazon S3 . You can use a KMS key to encrypt inventory data in Amazon S3 . You must specify a key that exist in the same AWS Region as the destination Amazon S3 bucket.", "title": "KMSKeyArn", "type": "string" }, @@ -232200,7 +232234,7 @@ "type": "object" }, "ResourceArn": { - "markdownDescription": "Amazon Resource Name (ARN) of the resource to which you want to attach a policy.", + "markdownDescription": "The Amazon Resource Name (ARN) of the resource to which you want to attach a policy.", "title": "ResourceArn", "type": "string" } @@ -232286,7 +232320,7 @@ "type": "array" }, "Type": { - "markdownDescription": "Refers to the type of contact:\n\n- `PERSONAL` : A single, individual contact.\n- `ESCALATION` : An escalation plan.\n- `ONCALL_SCHEDULE` : An on-call schedule.", + "markdownDescription": "The type of contact.\n\n- `PERSONAL` : A single, individual contact.\n- `ESCALATION` : An escalation plan.\n- `ONCALL_SCHEDULE` : An on-call schedule.", "title": "Type", "type": "string" } @@ -232977,7 +233011,7 @@ "additionalProperties": false, "properties": { "SseKmsKeyId": { - "markdownDescription": "The KMS key ID to use to encrypt your replication set.", + "markdownDescription": "The AWS Key Management Service key ID to use to encrypt your replication set.", "title": "SseKmsKeyId", "type": "string" } @@ -233136,7 +233170,7 @@ "items": { "type": "string" }, - "markdownDescription": "The SNS targets that AWS Chatbot uses to notify the chat channel of updates to an incident. You can also make updates to the incident through the chat channel by using the SNS topics", + "markdownDescription": "The Amazon SNS targets that AWS Chatbot uses to notify the chat channel of updates to an incident. You can also make updates to the incident through the chat channel by using the Amazon SNS topics", "title": "ChatbotSns", "type": "array" } @@ -233199,7 +233233,7 @@ "items": { "$ref": "#/definitions/AWS::SSMIncidents::ResponsePlan.NotificationTargetItem" }, - "markdownDescription": "The SNS targets that AWS Chatbot uses to notify the chat channel of updates to an incident. You can also make updates to the incident through the chat channel using the SNS topics.", + "markdownDescription": "The Amazon Simple Notification Service ( Amazon SNS ) targets that AWS Chatbot uses to notify the chat channel of updates to an incident. You can also make updates to the incident through the chat channel using the Amazon SNS topics.", "title": "NotificationTargets", "type": "array" }, @@ -233238,7 +233272,7 @@ "additionalProperties": false, "properties": { "SnsTopicArn": { - "markdownDescription": "The Amazon Resource Name (ARN) of the SNS topic.", + "markdownDescription": "The Amazon Resource Name (ARN) of the Amazon SNS topic.", "title": "SnsTopicArn", "type": "string" } @@ -233294,7 +233328,7 @@ "type": "string" }, "DocumentVersion": { - "markdownDescription": "The automation document's version to use when running.", + "markdownDescription": "The version of the runbook to use when running.", "title": "DocumentVersion", "type": "string" }, @@ -233310,7 +233344,7 @@ "items": { "$ref": "#/definitions/AWS::SSMIncidents::ResponsePlan.SsmParameter" }, - "markdownDescription": "The key-value pair parameters to use when running the automation document.", + "markdownDescription": "The key-value pair parameters to use when running the runbook.", "title": "Parameters", "type": "array" }, @@ -233335,7 +233369,7 @@ "additionalProperties": false, "properties": { "Key": { - "markdownDescription": "The key parameter to use when running the automation document.", + "markdownDescription": "The key parameter to use when running the Automation runbook.", "title": "Key", "type": "string" }, @@ -233343,7 +233377,7 @@ "items": { "type": "string" }, - "markdownDescription": "The value parameter to use when running the automation document.", + "markdownDescription": "The value parameter to use when running the Automation runbook.", "title": "Values", "type": "array" } @@ -236429,7 +236463,7 @@ "additionalProperties": false, "properties": { "FeatureName": { - "markdownDescription": "The name of a feature. The type must be a string. `FeatureName` cannot be any of the following: `is_deleted` , `write_time` , `api_invocation_time` .", + "markdownDescription": "The name of a feature. The type must be a string. `FeatureName` cannot be any of the following: `is_deleted` , `write_time` , `api_invocation_time` .\n\nThe name:\n\n- Must start and end with an alphanumeric character.\n- Can only include alphanumeric characters, underscores, and hyphens. Spaces are not allowed.", "title": "FeatureName", "type": "string" }, @@ -240016,7 +240050,7 @@ "properties": { "Bias": { "$ref": "#/definitions/AWS::SageMaker::ModelPackage.Bias", - "markdownDescription": "Metrics that measure bais in a model.", + "markdownDescription": "Metrics that measure bias in a model.", "title": "Bias" }, "Explainability": { @@ -242985,7 +243019,7 @@ "additionalProperties": false, "properties": { "CognitoClientId": { - "markdownDescription": "An identifier for an application client. You must create the app client ID using Amazon Cognito.", + "markdownDescription": "An identifier for an application client. You must create the app client ID using Amazon Cognito .", "title": "CognitoClientId", "type": "string" }, @@ -255547,9 +255581,7 @@ "additionalProperties": false, "properties": { "JsonBody": { - "$ref": "#/definitions/AWS::WAFv2::LoggingConfiguration.JsonBody", - "markdownDescription": "Redact the request body JSON.", - "title": "JsonBody" + "$ref": "#/definitions/AWS::WAFv2::LoggingConfiguration.JsonBody" }, "Method": { "markdownDescription": "Redact the indicated HTTP method. The method indicates the type of operation that the request is asking the origin to perform.", @@ -255607,18 +255639,12 @@ "additionalProperties": false, "properties": { "InvalidFallbackBehavior": { - "markdownDescription": "What AWS WAF should do if it fails to completely parse the JSON body. The options are the following:\n\n- `EVALUATE_AS_STRING` - Inspect the body as plain text. AWS WAF applies the text transformations and inspection criteria that you defined for the JSON inspection to the body text string.\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement.\n\nIf you don't provide this setting, AWS WAF parses and evaluates the content only up to the first parsing failure that it encounters.\n\nAWS WAF does its best to parse the entire JSON body, but might be forced to stop for reasons such as invalid characters, duplicate keys, truncation, and any content whose root node isn't an object or an array.\n\nAWS WAF parses the JSON in the following examples as two valid key, value pairs:\n\n- Missing comma: `{\"key1\":\"value1\"\"key2\":\"value2\"}`\n- Missing colon: `{\"key1\":\"value1\",\"key2\"\"value2\"}`\n- Extra colons: `{\"key1\"::\"value1\",\"key2\"\"value2\"}`", - "title": "InvalidFallbackBehavior", "type": "string" }, "MatchPattern": { - "$ref": "#/definitions/AWS::WAFv2::LoggingConfiguration.MatchPattern", - "markdownDescription": "The patterns to look for in the JSON body. AWS WAF inspects the results of these pattern matches against the rule inspection criteria.", - "title": "MatchPattern" + "$ref": "#/definitions/AWS::WAFv2::LoggingConfiguration.MatchPattern" }, "MatchScope": { - "markdownDescription": "The parts of the JSON to match against using the `MatchPattern` . If you specify `ALL` , AWS WAF matches against keys and values.\n\n`All` does not require a match to be found in the keys and a match to be found in the values. It requires a match to be found in the keys or the values or both. To require a match in the keys and in the values, use a logical `AND` statement to combine two match rules, one that inspects the keys and another that inspects the values.", - "title": "MatchScope", "type": "string" } }, @@ -255669,16 +255695,12 @@ "additionalProperties": false, "properties": { "All": { - "markdownDescription": "Match all of the elements.\n\nYou must specify either this setting or the `IncludedPaths` setting, but not both.", - "title": "All", "type": "object" }, "IncludedPaths": { "items": { "type": "string" }, - "markdownDescription": "Match only the specified include paths.\n\nProvide the include paths using JSON Pointer syntax. For example, `\"IncludedPaths\": [\"/dogs/0/name\", \"/dogs/1/name\"]` . For information about this syntax, see the Internet Engineering Task Force (IETF) documentation [JavaScript Object Notation (JSON) Pointer](https://docs.aws.amazon.com/https://tools.ietf.org/html/rfc6901) .\n\nYou must specify either this setting or the `All` setting, but not both.\n\n> Don't use this option to include all paths. Instead, use the `All` setting.", - "title": "IncludedPaths", "type": "array" } }, @@ -257018,7 +257040,7 @@ }, "RateBasedStatement": { "$ref": "#/definitions/AWS::WAFv2::RuleGroup.RateBasedStatement", - "markdownDescription": "A rate-based rule counts incoming requests and rate limits requests when they are coming at too fast a rate. The rule categorizes requests according to your aggregation criteria, collects them into aggregation instances, and counts and rate limits the requests for each instance.\n\nYou can specify individual aggregation keys, like IP address or HTTP method. You can also specify aggregation key combinations, like IP address and HTTP method, or HTTP method, query argument, and cookie.\n\nEach unique set of values for the aggregation keys that you specify is a separate aggregation instance, with the value from each key contributing to the aggregation instance definition.\n\nFor example, assume the rule evaluates web requests with the following IP address and HTTP method values:\n\n- IP address 10.1.1.1, HTTP method POST\n- IP address 10.1.1.1, HTTP method GET\n- IP address 127.0.0.0, HTTP method POST\n- IP address 10.1.1.1, HTTP method GET\n\nThe rule would create different aggregation instances according to your aggregation criteria, for example:\n\n- If the aggregation criteria is just the IP address, then each individual address is an aggregation instance, and AWS WAF counts requests separately for each. The aggregation instances and request counts for our example would be the following:\n\n- IP address 10.1.1.1: count 3\n- IP address 127.0.0.0: count 1\n- If the aggregation criteria is HTTP method, then each individual HTTP method is an aggregation instance. The aggregation instances and request counts for our example would be the following:\n\n- HTTP method POST: count 2\n- HTTP method GET: count 2\n- If the aggregation criteria is IP address and HTTP method, then each IP address and each HTTP method would contribute to the combined aggregation instance. The aggregation instances and request counts for our example would be the following:\n\n- IP address 10.1.1.1, HTTP method POST: count 1\n- IP address 10.1.1.1, HTTP method GET: count 2\n- IP address 127.0.0.0, HTTP method POST: count 1\n\nFor any n-tuple of aggregation keys, each unique combination of values for the keys defines a separate aggregation instance, which AWS WAF counts and rate-limits individually.\n\nYou can optionally nest another statement inside the rate-based statement, to narrow the scope of the rule so that it only counts and rate limits requests that match the nested statement. You can use this nested scope-down statement in conjunction with your aggregation key specifications or you can just count and rate limit all requests that match the scope-down statement, without additional aggregation. When you choose to just manage all requests that match a scope-down statement, the aggregation instance is singular for the rule.\n\nYou cannot nest a `RateBasedStatement` inside another statement, for example inside a `NotStatement` or `OrStatement` . You can define a `RateBasedStatement` inside a web ACL and inside a rule group.\n\nFor additional information about the options, see [Rate limiting web requests using rate-based rules](https://docs.aws.amazon.com/waf/latest/developerguide/waf-rate-based-rules.html) in the *AWS WAF Developer Guide* .\n\nIf you only aggregate on the individual IP address or forwarded IP address, you can retrieve the list of IP addresses that AWS WAF is currently rate limiting for a rule through the API call `GetRateBasedStatementManagedKeys` . This option is not available for other aggregation configurations.\n\nAWS WAF tracks and manages web requests separately for each instance of a rate-based rule that you use. For example, if you provide the same rate-based rule settings in two web ACLs, each of the two rule statements represents a separate instance of the rate-based rule and gets its own tracking and management by AWS WAF . If you define a rate-based rule inside a rule group, and then use that rule group in multiple places, each use creates a separate instance of the rate-based rule that gets its own tracking and management by AWS WAF .", + "markdownDescription": "A rate-based rule counts incoming requests and rate limits requests when they are coming at too fast a rate. The rule categorizes requests according to your aggregation criteria, collects them into aggregation instances, and counts and rate limits the requests for each instance.\n\n> If you change any of these settings in a rule that's currently in use, the change resets the rule's rate limiting counts. This can pause the rule's rate limiting activities for up to a minute. \n\nYou can specify individual aggregation keys, like IP address or HTTP method. You can also specify aggregation key combinations, like IP address and HTTP method, or HTTP method, query argument, and cookie.\n\nEach unique set of values for the aggregation keys that you specify is a separate aggregation instance, with the value from each key contributing to the aggregation instance definition.\n\nFor example, assume the rule evaluates web requests with the following IP address and HTTP method values:\n\n- IP address 10.1.1.1, HTTP method POST\n- IP address 10.1.1.1, HTTP method GET\n- IP address 127.0.0.0, HTTP method POST\n- IP address 10.1.1.1, HTTP method GET\n\nThe rule would create different aggregation instances according to your aggregation criteria, for example:\n\n- If the aggregation criteria is just the IP address, then each individual address is an aggregation instance, and AWS WAF counts requests separately for each. The aggregation instances and request counts for our example would be the following:\n\n- IP address 10.1.1.1: count 3\n- IP address 127.0.0.0: count 1\n- If the aggregation criteria is HTTP method, then each individual HTTP method is an aggregation instance. The aggregation instances and request counts for our example would be the following:\n\n- HTTP method POST: count 2\n- HTTP method GET: count 2\n- If the aggregation criteria is IP address and HTTP method, then each IP address and each HTTP method would contribute to the combined aggregation instance. The aggregation instances and request counts for our example would be the following:\n\n- IP address 10.1.1.1, HTTP method POST: count 1\n- IP address 10.1.1.1, HTTP method GET: count 2\n- IP address 127.0.0.0, HTTP method POST: count 1\n\nFor any n-tuple of aggregation keys, each unique combination of values for the keys defines a separate aggregation instance, which AWS WAF counts and rate-limits individually.\n\nYou can optionally nest another statement inside the rate-based statement, to narrow the scope of the rule so that it only counts and rate limits requests that match the nested statement. You can use this nested scope-down statement in conjunction with your aggregation key specifications or you can just count and rate limit all requests that match the scope-down statement, without additional aggregation. When you choose to just manage all requests that match a scope-down statement, the aggregation instance is singular for the rule.\n\nYou cannot nest a `RateBasedStatement` inside another statement, for example inside a `NotStatement` or `OrStatement` . You can define a `RateBasedStatement` inside a web ACL and inside a rule group.\n\nFor additional information about the options, see [Rate limiting web requests using rate-based rules](https://docs.aws.amazon.com/waf/latest/developerguide/waf-rate-based-rules.html) in the *AWS WAF Developer Guide* .\n\nIf you only aggregate on the individual IP address or forwarded IP address, you can retrieve the list of IP addresses that AWS WAF is currently rate limiting for a rule through the API call `GetRateBasedStatementManagedKeys` . This option is not available for other aggregation configurations.\n\nAWS WAF tracks and manages web requests separately for each instance of a rate-based rule that you use. For example, if you provide the same rate-based rule settings in two web ACLs, each of the two rule statements represents a separate instance of the rate-based rule and gets its own tracking and management by AWS WAF . If you define a rate-based rule inside a rule group, and then use that rule group in multiple places, each use creates a separate instance of the rate-based rule that gets its own tracking and management by AWS WAF .", "title": "RateBasedStatement" }, "RegexMatchStatement": { @@ -258883,7 +258905,7 @@ }, "RateBasedStatement": { "$ref": "#/definitions/AWS::WAFv2::WebACL.RateBasedStatement", - "markdownDescription": "A rate-based rule counts incoming requests and rate limits requests when they are coming at too fast a rate. The rule categorizes requests according to your aggregation criteria, collects them into aggregation instances, and counts and rate limits the requests for each instance.\n\nYou can specify individual aggregation keys, like IP address or HTTP method. You can also specify aggregation key combinations, like IP address and HTTP method, or HTTP method, query argument, and cookie.\n\nEach unique set of values for the aggregation keys that you specify is a separate aggregation instance, with the value from each key contributing to the aggregation instance definition.\n\nFor example, assume the rule evaluates web requests with the following IP address and HTTP method values:\n\n- IP address 10.1.1.1, HTTP method POST\n- IP address 10.1.1.1, HTTP method GET\n- IP address 127.0.0.0, HTTP method POST\n- IP address 10.1.1.1, HTTP method GET\n\nThe rule would create different aggregation instances according to your aggregation criteria, for example:\n\n- If the aggregation criteria is just the IP address, then each individual address is an aggregation instance, and AWS WAF counts requests separately for each. The aggregation instances and request counts for our example would be the following:\n\n- IP address 10.1.1.1: count 3\n- IP address 127.0.0.0: count 1\n- If the aggregation criteria is HTTP method, then each individual HTTP method is an aggregation instance. The aggregation instances and request counts for our example would be the following:\n\n- HTTP method POST: count 2\n- HTTP method GET: count 2\n- If the aggregation criteria is IP address and HTTP method, then each IP address and each HTTP method would contribute to the combined aggregation instance. The aggregation instances and request counts for our example would be the following:\n\n- IP address 10.1.1.1, HTTP method POST: count 1\n- IP address 10.1.1.1, HTTP method GET: count 2\n- IP address 127.0.0.0, HTTP method POST: count 1\n\nFor any n-tuple of aggregation keys, each unique combination of values for the keys defines a separate aggregation instance, which AWS WAF counts and rate-limits individually.\n\nYou can optionally nest another statement inside the rate-based statement, to narrow the scope of the rule so that it only counts and rate limits requests that match the nested statement. You can use this nested scope-down statement in conjunction with your aggregation key specifications or you can just count and rate limit all requests that match the scope-down statement, without additional aggregation. When you choose to just manage all requests that match a scope-down statement, the aggregation instance is singular for the rule.\n\nYou cannot nest a `RateBasedStatement` inside another statement, for example inside a `NotStatement` or `OrStatement` . You can define a `RateBasedStatement` inside a web ACL and inside a rule group.\n\nFor additional information about the options, see [Rate limiting web requests using rate-based rules](https://docs.aws.amazon.com/waf/latest/developerguide/waf-rate-based-rules.html) in the *AWS WAF Developer Guide* .\n\nIf you only aggregate on the individual IP address or forwarded IP address, you can retrieve the list of IP addresses that AWS WAF is currently rate limiting for a rule through the API call `GetRateBasedStatementManagedKeys` . This option is not available for other aggregation configurations.\n\nAWS WAF tracks and manages web requests separately for each instance of a rate-based rule that you use. For example, if you provide the same rate-based rule settings in two web ACLs, each of the two rule statements represents a separate instance of the rate-based rule and gets its own tracking and management by AWS WAF . If you define a rate-based rule inside a rule group, and then use that rule group in multiple places, each use creates a separate instance of the rate-based rule that gets its own tracking and management by AWS WAF .", + "markdownDescription": "A rate-based rule counts incoming requests and rate limits requests when they are coming at too fast a rate. The rule categorizes requests according to your aggregation criteria, collects them into aggregation instances, and counts and rate limits the requests for each instance.\n\n> If you change any of these settings in a rule that's currently in use, the change resets the rule's rate limiting counts. This can pause the rule's rate limiting activities for up to a minute. \n\nYou can specify individual aggregation keys, like IP address or HTTP method. You can also specify aggregation key combinations, like IP address and HTTP method, or HTTP method, query argument, and cookie.\n\nEach unique set of values for the aggregation keys that you specify is a separate aggregation instance, with the value from each key contributing to the aggregation instance definition.\n\nFor example, assume the rule evaluates web requests with the following IP address and HTTP method values:\n\n- IP address 10.1.1.1, HTTP method POST\n- IP address 10.1.1.1, HTTP method GET\n- IP address 127.0.0.0, HTTP method POST\n- IP address 10.1.1.1, HTTP method GET\n\nThe rule would create different aggregation instances according to your aggregation criteria, for example:\n\n- If the aggregation criteria is just the IP address, then each individual address is an aggregation instance, and AWS WAF counts requests separately for each. The aggregation instances and request counts for our example would be the following:\n\n- IP address 10.1.1.1: count 3\n- IP address 127.0.0.0: count 1\n- If the aggregation criteria is HTTP method, then each individual HTTP method is an aggregation instance. The aggregation instances and request counts for our example would be the following:\n\n- HTTP method POST: count 2\n- HTTP method GET: count 2\n- If the aggregation criteria is IP address and HTTP method, then each IP address and each HTTP method would contribute to the combined aggregation instance. The aggregation instances and request counts for our example would be the following:\n\n- IP address 10.1.1.1, HTTP method POST: count 1\n- IP address 10.1.1.1, HTTP method GET: count 2\n- IP address 127.0.0.0, HTTP method POST: count 1\n\nFor any n-tuple of aggregation keys, each unique combination of values for the keys defines a separate aggregation instance, which AWS WAF counts and rate-limits individually.\n\nYou can optionally nest another statement inside the rate-based statement, to narrow the scope of the rule so that it only counts and rate limits requests that match the nested statement. You can use this nested scope-down statement in conjunction with your aggregation key specifications or you can just count and rate limit all requests that match the scope-down statement, without additional aggregation. When you choose to just manage all requests that match a scope-down statement, the aggregation instance is singular for the rule.\n\nYou cannot nest a `RateBasedStatement` inside another statement, for example inside a `NotStatement` or `OrStatement` . You can define a `RateBasedStatement` inside a web ACL and inside a rule group.\n\nFor additional information about the options, see [Rate limiting web requests using rate-based rules](https://docs.aws.amazon.com/waf/latest/developerguide/waf-rate-based-rules.html) in the *AWS WAF Developer Guide* .\n\nIf you only aggregate on the individual IP address or forwarded IP address, you can retrieve the list of IP addresses that AWS WAF is currently rate limiting for a rule through the API call `GetRateBasedStatementManagedKeys` . This option is not available for other aggregation configurations.\n\nAWS WAF tracks and manages web requests separately for each instance of a rate-based rule that you use. For example, if you provide the same rate-based rule settings in two web ACLs, each of the two rule statements represents a separate instance of the rate-based rule and gets its own tracking and management by AWS WAF . If you define a rate-based rule inside a rule group, and then use that rule group in multiple places, each use creates a separate instance of the rate-based rule that gets its own tracking and management by AWS WAF .", "title": "RateBasedStatement" }, "RegexMatchStatement": { diff --git a/samtranslator/translator/arn_generator.py b/samtranslator/translator/arn_generator.py index 50c180604..a9f6e0325 100644 --- a/samtranslator/translator/arn_generator.py +++ b/samtranslator/translator/arn_generator.py @@ -25,6 +25,8 @@ def _region_to_partition(region: str) -> str: return "aws-iso-b" if region_string.startswith("us-gov"): return "aws-us-gov" + if region_string.startswith("eu-isoe"): + return "aws-iso-e" return "aws" diff --git a/schema_source/cloudformation-docs.json b/schema_source/cloudformation-docs.json index b22b27d64..64000d8da 100644 --- a/schema_source/cloudformation-docs.json +++ b/schema_source/cloudformation-docs.json @@ -203,28 +203,28 @@ "SourceAccount": "The ID of the account that assigned the permission." }, "AWS::APS::RuleGroupsNamespace": { - "Data": "The rules definition file for this namespace.", - "Name": "The name of the rule groups namespace. This property is required.", - "Tags": "A list of key and value pairs for the workspace resources.", - "Workspace": "The ARN of the workspace that contains this rule groups namespace." + "Data": "The rules file used in the namespace.\n\nFor more details about the rules file, see [Creating a rules file](https://docs.aws.amazon.com/prometheus/latest/userguide/AMP-ruler-rulesfile.html) in the *Amazon Managed Service for Prometheus User Guide* .", + "Name": "The name of the rule groups namespace.", + "Tags": "The list of tag keys and values that are associated with the rule groups namespace.", + "Workspace": "An Amazon Managed Service for Prometheus workspace is a logical and isolated Prometheus server dedicated to ingesting, storing, and querying your Prometheus-compatible metrics." }, "AWS::APS::RuleGroupsNamespace Tag": { - "Key": "", - "Value": "" + "Key": "The key of the tag. Must not begin with `aws:` .", + "Value": "The value of the tag." }, "AWS::APS::Workspace": { - "AlertManagerDefinition": "The alert manager definition for the workspace, as a string. For more information, see [Alert manager and templating](https://docs.aws.amazon.com/prometheus/latest/userguide/AMP-alert-manager.html) .", - "Alias": "An alias that you assign to this workspace to help you identify it. It does not need to be unique.\n\nThe alias can be as many as 100 characters and can include any type of characters. Amazon Managed Service for Prometheus automatically strips any blank spaces from the beginning and end of the alias that you specify.", - "KmsKeyArn": "", - "LoggingConfiguration": "The LoggingConfiguration attribute is used to set the logging configuration for the workspace.", - "Tags": "A list of tag keys and values to associate with the workspace." + "AlertManagerDefinition": "The alert manager definition, a YAML configuration for the alert manager in your Amazon Managed Service for Prometheus workspace.\n\nFor details about the alert manager definition, see [Creating an alert manager configuration files](https://docs.aws.amazon.com/prometheus/latest/userguide/AMP-alertmanager-config.html) in the *Amazon Managed Service for Prometheus User Guide* .\n\nThe following example shows part of a CloudFormation YAML file with an embedded alert manager definition (following the `- |-` ).\n\n`Workspace: Type: AWS::APS::Workspace .... Properties: .... AlertManagerDefinition: Fn::Sub: - |- alertmanager_config: | templates: - 'default_template' route: receiver: example-sns receivers: - name: example-sns sns_configs: - topic_arn: 'arn:aws:sns:${AWS::Region}:${AWS::AccountId}:${TopicName}' -`", + "Alias": "The alias that is assigned to this workspace to help identify it. It does not need to be unique.", + "KmsKeyArn": "(optional) The ARN for a customer managed AWS KMS key to use for encrypting data within your workspace. For more information about using your own key in your workspace, see [Encryption at rest](https://docs.aws.amazon.com/prometheus/latest/userguide/encryption-at-rest-Amazon-Service-Prometheus.html) in the *Amazon Managed Service for Prometheus User Guide* .", + "LoggingConfiguration": "Contains information about the logging configuration for the workspace.", + "Tags": "The list of tag keys and values that are associated with the workspace." }, "AWS::APS::Workspace LoggingConfiguration": { - "LogGroupArn": "The Amazon Resource Name (ARN) of the CloudWatch log group the logs are emitted to." + "LogGroupArn": "The ARN of the CloudWatch log group to which the vended log data will be published. This log group must exist prior to calling this operation." }, "AWS::APS::Workspace Tag": { - "Key": "", - "Value": "" + "Key": "The key of the tag. Must not begin with `aws:` .", + "Value": "The value of the tag." }, "AWS::ARCZonalShift::ZonalAutoshiftConfiguration": { "PracticeRunConfiguration": "A practice run configuration for a resource includes the Amazon CloudWatch alarms that you've specified for a practice run, as well as any blocked dates or blocked windows for the practice run. When a resource has a practice run configuration, Route 53 ARC shifts traffic for the resource weekly for practice runs.\n\nPractice runs are required for zonal autoshift. The zonal shifts that Route 53 ARC starts for practice runs help you to ensure that shifting away traffic from an Availability Zone during an autoshift is safe for your application.\n\nYou can update or delete a practice run configuration. Before you delete a practice run configuration, you must disable zonal autoshift for the resource. A practice run configuration is required when zonal autoshift is enabled.", @@ -354,49 +354,49 @@ "Revision": "The revision number of the configuration." }, "AWS::Amplify::App": { - "AccessToken": "The personal access token for a GitHub repository for an Amplify app. The personal access token is used to authorize access to a GitHub repository using the Amplify GitHub App. The token is not stored.\n\nUse `AccessToken` for GitHub repositories only. To authorize access to a repository provider such as Bitbucket or CodeCommit, use `OauthToken` .\n\nYou must specify either `AccessToken` or `OauthToken` when you create a new app.\n\nExisting Amplify apps deployed from a GitHub repository using OAuth continue to work with CI/CD. However, we strongly recommend that you migrate these apps to use the GitHub App. For more information, see [Migrating an existing OAuth app to the Amplify GitHub App](https://docs.aws.amazon.com/amplify/latest/userguide/setting-up-GitHub-access.html#migrating-to-github-app-auth) in the *Amplify User Guide* .\n\n*Length Constraints:* Minimum length of 1. Maximum length of 255.", + "AccessToken": "The personal access token for a GitHub repository for an Amplify app. The personal access token is used to authorize access to a GitHub repository using the Amplify GitHub App. The token is not stored.\n\nUse `AccessToken` for GitHub repositories only. To authorize access to a repository provider such as Bitbucket or CodeCommit, use `OauthToken` .\n\nYou must specify either `AccessToken` or `OauthToken` when you create a new app.\n\nExisting Amplify apps deployed from a GitHub repository using OAuth continue to work with CI/CD. However, we strongly recommend that you migrate these apps to use the GitHub App. For more information, see [Migrating an existing OAuth app to the Amplify GitHub App](https://docs.aws.amazon.com/amplify/latest/userguide/setting-up-GitHub-access.html#migrating-to-github-app-auth) in the *Amplify User Guide* .", "AutoBranchCreationConfig": "Sets the configuration for your automatic branch creation.", "BasicAuthConfig": "The credentials for basic authorization for an Amplify app. You must base64-encode the authorization credentials and provide them in the format `user:password` .", - "BuildSpec": "The build specification (build spec) for an Amplify app.\n\n*Length Constraints:* Minimum length of 1. Maximum length of 25000.\n\n*Pattern:* (?s).+", - "CustomHeaders": "The custom HTTP headers for an Amplify app.\n\n*Length Constraints:* Minimum length of 0. Maximum length of 25000.\n\n*Pattern:* (?s).*", + "BuildSpec": "The build specification (build spec) for an Amplify app.", + "CustomHeaders": "The custom HTTP headers for an Amplify app.", "CustomRules": "The custom rewrite and redirect rules for an Amplify app.", - "Description": "The description for an Amplify app.\n\n*Length Constraints:* Maximum length of 1000.\n\n*Pattern:* (?s).*", + "Description": "The description of the Amplify app.", "EnableBranchAutoDeletion": "Automatically disconnect a branch in Amplify Hosting when you delete a branch from your Git repository.", "EnvironmentVariables": "The environment variables map for an Amplify app.\n\nFor a list of the environment variables that are accessible to Amplify by default, see [Amplify Environment variables](https://docs.aws.amazon.com/amplify/latest/userguide/amplify-console-environment-variables.html) in the *Amplify Hosting User Guide* .", - "IAMServiceRole": "The AWS Identity and Access Management (IAM) service role for the Amazon Resource Name (ARN) of the Amplify app.\n\n*Length Constraints:* Minimum length of 0. Maximum length of 1000.\n\n*Pattern:* (?s).*", - "Name": "The name for an Amplify app.\n\n*Length Constraints:* Minimum length of 1. Maximum length of 255.\n\n*Pattern:* (?s).+", - "OauthToken": "The OAuth token for a third-party source control system for an Amplify app. The OAuth token is used to create a webhook and a read-only deploy key using SSH cloning. The OAuth token is not stored.\n\nUse `OauthToken` for repository providers other than GitHub, such as Bitbucket or CodeCommit. To authorize access to GitHub as your repository provider, use `AccessToken` .\n\nYou must specify either `OauthToken` or `AccessToken` when you create a new app.\n\nExisting Amplify apps deployed from a GitHub repository using OAuth continue to work with CI/CD. However, we strongly recommend that you migrate these apps to use the GitHub App. For more information, see [Migrating an existing OAuth app to the Amplify GitHub App](https://docs.aws.amazon.com/amplify/latest/userguide/setting-up-GitHub-access.html#migrating-to-github-app-auth) in the *Amplify User Guide* .\n\n*Length Constraints:* Maximum length of 1000.\n\n*Pattern:* (?s).*", + "IAMServiceRole": "AWS Identity and Access Management ( IAM ) service role for the Amazon Resource Name (ARN) of the Amplify app.", + "Name": "The name of the Amplify app.", + "OauthToken": "The OAuth token for a third-party source control system for an Amplify app. The OAuth token is used to create a webhook and a read-only deploy key using SSH cloning. The OAuth token is not stored.\n\nUse `OauthToken` for repository providers other than GitHub, such as Bitbucket or CodeCommit. To authorize access to GitHub as your repository provider, use `AccessToken` .\n\nYou must specify either `OauthToken` or `AccessToken` when you create a new app.\n\nExisting Amplify apps deployed from a GitHub repository using OAuth continue to work with CI/CD. However, we strongly recommend that you migrate these apps to use the GitHub App. For more information, see [Migrating an existing OAuth app to the Amplify GitHub App](https://docs.aws.amazon.com/amplify/latest/userguide/setting-up-GitHub-access.html#migrating-to-github-app-auth) in the *Amplify User Guide* .", "Platform": "The platform for the Amplify app. For a static app, set the platform type to `WEB` . For a dynamic server-side rendered (SSR) app, set the platform type to `WEB_COMPUTE` . For an app requiring Amplify Hosting's original SSR support only, set the platform type to `WEB_DYNAMIC` .", - "Repository": "The repository for an Amplify app.\n\n*Pattern:* (?s).*", + "Repository": "The Git repository for the Amplify app.", "Tags": "The tag for an Amplify app." }, "AWS::Amplify::App AutoBranchCreationConfig": { "AutoBranchCreationPatterns": "Automated branch creation glob patterns for the Amplify app.", "BasicAuthConfig": "Sets password protection for your auto created branch.", - "BuildSpec": "The build specification (build spec) for the autocreated branch.\n\n*Length Constraints:* Minimum length of 1. Maximum length of 25000.", + "BuildSpec": "The build specification (build spec) for the autocreated branch.", "EnableAutoBranchCreation": "Enables automated branch creation for the Amplify app.", "EnableAutoBuild": "Enables auto building for the auto created branch.", "EnablePerformanceMode": "Enables performance mode for the branch.\n\nPerformance mode optimizes for faster hosting performance by keeping content cached at the edge for a longer interval. When performance mode is enabled, hosting configuration or code changes can take up to 10 minutes to roll out.", "EnablePullRequestPreview": "Sets whether pull request previews are enabled for each branch that Amplify Hosting automatically creates for your app. Amplify creates previews by deploying your app to a unique URL whenever a pull request is opened for the branch. Development and QA teams can use this preview to test the pull request before it's merged into a production or integration branch.\n\nTo provide backend support for your preview, Amplify Hosting automatically provisions a temporary backend environment that it deletes when the pull request is closed. If you want to specify a dedicated backend environment for your previews, use the `PullRequestEnvironmentName` property.\n\nFor more information, see [Web Previews](https://docs.aws.amazon.com/amplify/latest/userguide/pr-previews.html) in the *AWS Amplify Hosting User Guide* .", "EnvironmentVariables": "Environment variables for the auto created branch.", "Framework": "The framework for the autocreated branch.", - "PullRequestEnvironmentName": "If pull request previews are enabled, you can use this property to specify a dedicated backend environment for your previews. For example, you could specify an environment named `prod` , `test` , or `dev` that you initialized with the Amplify CLI.\n\nTo enable pull request previews, set the `EnablePullRequestPreview` property to `true` .\n\nIf you don't specify an environment, Amplify Hosting provides backend support for each preview by automatically provisioning a temporary backend environment. Amplify deletes this environment when the pull request is closed.\n\nFor more information about creating backend environments, see [Feature Branch Deployments and Team Workflows](https://docs.aws.amazon.com/amplify/latest/userguide/multi-environments.html) in the *AWS Amplify Hosting User Guide* .\n\n*Length Constraints:* Maximum length of 20.\n\n*Pattern:* (?s).*", + "PullRequestEnvironmentName": "If pull request previews are enabled, you can use this property to specify a dedicated backend environment for your previews. For example, you could specify an environment named `prod` , `test` , or `dev` that you initialized with the Amplify CLI.\n\nTo enable pull request previews, set the `EnablePullRequestPreview` property to `true` .\n\nIf you don't specify an environment, Amplify Hosting provides backend support for each preview by automatically provisioning a temporary backend environment. Amplify deletes this environment when the pull request is closed.\n\nFor more information about creating backend environments, see [Feature Branch Deployments and Team Workflows](https://docs.aws.amazon.com/amplify/latest/userguide/multi-environments.html) in the *AWS Amplify Hosting User Guide* .", "Stage": "Stage for the auto created branch." }, "AWS::Amplify::App BasicAuthConfig": { "EnableBasicAuth": "Enables basic authorization for the Amplify app's branches.", - "Password": "The password for basic authorization.\n\n*Length Constraints:* Minimum length of 1. Maximum length of 255.", - "Username": "The user name for basic authorization.\n\n*Length Constraints:* Minimum length of 1. Maximum length of 255." + "Password": "The password for basic authorization.", + "Username": "The user name for basic authorization." }, "AWS::Amplify::App CustomRule": { - "Condition": "The condition for a URL rewrite or redirect rule, such as a country code.\n\n*Length Constraints:* Minimum length of 0. Maximum length of 2048.\n\n*Pattern:* (?s).*", - "Source": "The source pattern for a URL rewrite or redirect rule.\n\n*Length Constraints:* Minimum length of 1. Maximum length of 2048.\n\n*Pattern:* (?s).+", - "Status": "The status code for a URL rewrite or redirect rule.\n\n- **200** - Represents a 200 rewrite rule.\n- **301** - Represents a 301 (moved pemanently) redirect rule. This and all future requests should be directed to the target URL.\n- **302** - Represents a 302 temporary redirect rule.\n- **404** - Represents a 404 redirect rule.\n- **404-200** - Represents a 404 rewrite rule.\n\n*Length Constraints:* Minimum length of 3. Maximum length of 7.\n\n*Pattern:* .{3,7}", - "Target": "The target pattern for a URL rewrite or redirect rule.\n\n*Length Constraints:* Minimum length of 1. Maximum length of 2048.\n\n*Pattern:* (?s).+" + "Condition": "The condition for a URL rewrite or redirect rule, such as a country code.", + "Source": "The source pattern for a URL rewrite or redirect rule.", + "Status": "The status code for a URL rewrite or redirect rule.\n\n- **200** - Represents a 200 rewrite rule.\n- **301** - Represents a 301 (moved pemanently) redirect rule. This and all future requests should be directed to the target URL.\n- **302** - Represents a 302 temporary redirect rule.\n- **404** - Represents a 404 redirect rule.\n- **404-200** - Represents a 404 rewrite rule.", + "Target": "The target pattern for a URL rewrite or redirect rule." }, "AWS::Amplify::App EnvironmentVariable": { - "Name": "The environment variable name.\n\n*Length Constraints:* Maximum length of 255.\n\n*Pattern:* (?s).*", - "Value": "The environment variable value.\n\n*Length Constraints:* Maximum length of 5500.\n\n*Pattern:* (?s).*" + "Name": "", + "Value": "" }, "AWS::Amplify::App Tag": { "Key": "Specifies the key for the tag.", @@ -404,18 +404,18 @@ }, "AWS::Amplify::Branch": { "AppId": "The unique ID for an Amplify app.", - "Backend": "The backend environment for an Amplify app.", + "Backend": "Specifies the backend for a `Branch` of an Amplify app.", "BasicAuthConfig": "The basic authorization credentials for a branch of an Amplify app. You must base64-encode the authorization credentials and provide them in the format `user:password` .", - "BranchName": "The name for the branch.\n\n*Length Constraints:* Minimum length of 1. Maximum length of 255.\n\n*Pattern:* (?s).+", - "BuildSpec": "The build specification (build spec) for the branch.\n\n*Length Constraints:* Minimum length of 1. Maximum length of 25000.\n\n*Pattern:* (?s).+", - "Description": "The description for the branch that is part of an Amplify app.\n\n*Length Constraints:* Maximum length of 1000.\n\n*Pattern:* (?s).*", + "BranchName": "The name for the branch.", + "BuildSpec": "The build specification (build spec) for the branch.", + "Description": "The description for the branch that is part of an Amplify app.", "EnableAutoBuild": "Enables auto building for the branch.", "EnablePerformanceMode": "Enables performance mode for the branch.\n\nPerformance mode optimizes for faster hosting performance by keeping content cached at the edge for a longer interval. When performance mode is enabled, hosting configuration or code changes can take up to 10 minutes to roll out.", "EnablePullRequestPreview": "Specifies whether Amplify Hosting creates a preview for each pull request that is made for this branch. If this property is enabled, Amplify deploys your app to a unique preview URL after each pull request is opened. Development and QA teams can use this preview to test the pull request before it's merged into a production or integration branch.\n\nTo provide backend support for your preview, Amplify automatically provisions a temporary backend environment that it deletes when the pull request is closed. If you want to specify a dedicated backend environment for your previews, use the `PullRequestEnvironmentName` property.\n\nFor more information, see [Web Previews](https://docs.aws.amazon.com/amplify/latest/userguide/pr-previews.html) in the *AWS Amplify Hosting User Guide* .", "EnvironmentVariables": "The environment variables for the branch.", "Framework": "The framework for the branch.", - "PullRequestEnvironmentName": "If pull request previews are enabled for this branch, you can use this property to specify a dedicated backend environment for your previews. For example, you could specify an environment named `prod` , `test` , or `dev` that you initialized with the Amplify CLI and mapped to this branch.\n\nTo enable pull request previews, set the `EnablePullRequestPreview` property to `true` .\n\nIf you don't specify an environment, Amplify Hosting provides backend support for each preview by automatically provisioning a temporary backend environment. Amplify Hosting deletes this environment when the pull request is closed.\n\nFor more information about creating backend environments, see [Feature Branch Deployments and Team Workflows](https://docs.aws.amazon.com/amplify/latest/userguide/multi-environments.html) in the *AWS Amplify Hosting User Guide* .\n\n*Length Constraints:* Maximum length of 20.\n\n*Pattern:* (?s).*", - "Stage": "Describes the current stage for the branch.\n\n*Valid Values:* PRODUCTION | BETA | DEVELOPMENT | EXPERIMENTAL | PULL_REQUEST", + "PullRequestEnvironmentName": "If pull request previews are enabled for this branch, you can use this property to specify a dedicated backend environment for your previews. For example, you could specify an environment named `prod` , `test` , or `dev` that you initialized with the Amplify CLI and mapped to this branch.\n\nTo enable pull request previews, set the `EnablePullRequestPreview` property to `true` .\n\nIf you don't specify an environment, Amplify Hosting provides backend support for each preview by automatically provisioning a temporary backend environment. Amplify Hosting deletes this environment when the pull request is closed.\n\nFor more information about creating backend environments, see [Feature Branch Deployments and Team Workflows](https://docs.aws.amazon.com/amplify/latest/userguide/multi-environments.html) in the *AWS Amplify Hosting User Guide* .", + "Stage": "Describes the current stage for the branch.", "Tags": "The tag for the branch." }, "AWS::Amplify::Branch Backend": { @@ -423,28 +423,40 @@ }, "AWS::Amplify::Branch BasicAuthConfig": { "EnableBasicAuth": "Enables basic authorization for the branch.", - "Password": "The password for basic authorization.\n\n*Length Constraints:* Minimum length of 1. Maximum length of 255.", - "Username": "The user name for basic authorization.\n\n*Length Constraints:* Minimum length of 1. Maximum length of 255." + "Password": "The password for basic authorization.", + "Username": "" }, "AWS::Amplify::Branch EnvironmentVariable": { - "Name": "The environment variable name.\n\n*Length Constraints:* Maximum length of 255.\n\n*Pattern:* (?s).*", - "Value": "The environment variable value.\n\n*Length Constraints:* Maximum length of 5500.\n\n*Pattern:* (?s).*" + "Name": "The environment variable name.", + "Value": "The environment variable value." }, "AWS::Amplify::Branch Tag": { "Key": "Specifies the key for the tag.", "Value": "Specifies the value for the tag." }, "AWS::Amplify::Domain": { - "AppId": "The unique ID for an Amplify app.\n\n*Length Constraints:* Minimum length of 1. Maximum length of 20.\n\n*Pattern:* d[a-z0-9]+", + "AppId": "The unique ID for an Amplify app.", "AutoSubDomainCreationPatterns": "Sets the branch patterns for automatic subdomain creation.", - "AutoSubDomainIAMRole": "The required AWS Identity and Access Management (IAM) service role for the Amazon Resource Name (ARN) for automatically creating subdomains.\n\n*Length Constraints:* Maximum length of 1000.\n\n*Pattern:* ^$|^arn:aws:iam::\\d{12}:role.+", - "DomainName": "The domain name for the domain association.\n\n*Length Constraints:* Maximum length of 255.\n\n*Pattern:* ^(((?!-)[A-Za-z0-9-]{0,62}[A-Za-z0-9])\\.)+((?!-)[A-Za-z0-9-]{1,62}[A-Za-z0-9])(\\.)?$", + "AutoSubDomainIAMRole": "The required AWS Identity and Access Management (IAMlong) service role for the Amazon Resource Name (ARN) for automatically creating subdomains.", + "Certificate": "Describes the SSL/TLS certificate for the domain association. This can be your own custom certificate or the default certificate that Amplify provisions for you.\n\nIf you are updating your domain to use a different certificate, `Certificate` points to the new certificate that is being created instead of the current active certificate. Otherwise, `Certificate` points to the current active certificate.", + "CertificateSettings": "The type of SSL/TLS certificate to use for your custom domain. If you don't specify a certificate type, Amplify uses the default certificate that it provisions and manages for you.", + "DomainName": "The domain name for the domain association.", "EnableAutoSubDomain": "Enables the automated creation of subdomains for branches.", - "SubDomainSettings": "The setting for the subdomain." + "SubDomainSettings": "The setting for the subdomain.", + "UpdateStatus": "The status of the domain update operation that is currently in progress. The following list describes the valid update states.\n\n- **REQUESTING_CERTIFICATE** - The certificate is in the process of being updated.\n- **PENDING_VERIFICATION** - Indicates that an Amplify managed certificate is in the process of being verified. This occurs during the creation of a custom domain or when a custom domain is updated to use a managed certificate.\n- **IMPORTING_CUSTOM_CERTIFICATE** - Indicates that an Amplify custom certificate is in the process of being imported. This occurs during the creation of a custom domain or when a custom domain is updated to use a custom certificate.\n- **PENDING_DEPLOYMENT** - Indicates that the subdomain or certificate changes are being propagated.\n- **AWAITING_APP_CNAME** - Amplify is waiting for CNAME records corresponding to subdomains to be propagated. If your custom domain is on Route\u00a053, Amplify handles this for you automatically. For more information about custom domains, see [Setting up custom domains](https://docs.aws.amazon.com/amplify/latest/userguide/custom-domains.html) in the *Amplify Hosting User Guide* .\n- **UPDATE_COMPLETE** - The certificate has been associated with a domain.\n- **UPDATE_FAILED** - The certificate has failed to be provisioned or associated, and there is no existing active certificate to roll back to." + }, + "AWS::Amplify::Domain Certificate": { + "CertificateArn": "The Amazon resource name (ARN) for a custom certificate that you have already added to AWS Certificate Manager in your AWS account .\n\nThis field is required only when the certificate type is `CUSTOM` .", + "CertificateType": "The type of SSL/TLS certificate that you want to use.\n\nSpecify `AMPLIFY_MANAGED` to use the default certificate that Amplify provisions for you.\n\nSpecify `CUSTOM` to use your own certificate that you have already added to AWS Certificate Manager in your AWS account . Make sure you request (or import) the certificate in the US East (N. Virginia) Region (us-east-1). For more information about using ACM, see [Importing certificates into AWS Certificate Manager](https://docs.aws.amazon.com/acm/latest/userguide/import-certificate.html) in the *ACM User guide* .", + "CertificateVerificationDNSRecord": "The DNS record for certificate verification." + }, + "AWS::Amplify::Domain CertificateSettings": { + "CertificateType": "The certificate type.\n\nSpecify `AMPLIFY_MANAGED` to use the default certificate that Amplify provisions for you.\n\nSpecify `CUSTOM` to use your own certificate that you have already added to AWS Certificate Manager in your AWS account . Make sure you request (or import) the certificate in the US East (N. Virginia) Region (us-east-1). For more information about using ACM, see [Importing certificates into AWS Certificate Manager](https://docs.aws.amazon.com/acm/latest/userguide/import-certificate.html) in the *ACM User guide* .", + "CustomCertificateArn": "The Amazon resource name (ARN) for the custom certificate that you have already added to AWS Certificate Manager in your AWS account .\n\nThis field is required only when the certificate type is `CUSTOM` ." }, "AWS::Amplify::Domain SubDomainSetting": { "BranchName": "The branch name setting for the subdomain.\n\n*Length Constraints:* Minimum length of 1. Maximum length of 255.\n\n*Pattern:* (?s).+", - "Prefix": "The prefix setting for the subdomain.\n\n*Length Constraints:* Maximum length of 255.\n\n*Pattern:* (?s).*" + "Prefix": "The prefix setting for the subdomain." }, "AWS::AmplifyUIBuilder::Component": { "AppId": "The unique ID of the Amplify app associated with the component.", @@ -1062,7 +1074,7 @@ }, "AWS::ApiGatewayV2::ApiGatewayManagedOverrides IntegrationOverrides": { "Description": "The description of the integration.", - "IntegrationMethod": "Specifies the integration's HTTP method type.", + "IntegrationMethod": "Specifies the integration's HTTP method type. For WebSocket APIs, if you use a Lambda integration, you must set the integration method to `POST` .", "PayloadFormatVersion": "Specifies the format of the payload sent to an integration. Required for HTTP APIs. For HTTP APIs, supported values for Lambda proxy integrations are `1.0` and `2.0` . For all other integrations, `1.0` is the only supported value. To learn more, see [Working with AWS Lambda proxy integrations for HTTP APIs](https://docs.aws.amazon.com/apigateway/latest/developerguide/http-api-develop-integrations-lambda.html) .", "TimeoutInMillis": "Custom timeout between 50 and 29,000 milliseconds for WebSocket APIs and between 50 and 30,000 milliseconds for HTTP APIs. The default timeout is 29 seconds for WebSocket APIs and 30 seconds for HTTP APIs." }, @@ -1140,7 +1152,7 @@ "ContentHandlingStrategy": "Supported only for WebSocket APIs. Specifies how to handle response payload content type conversions. Supported values are `CONVERT_TO_BINARY` and `CONVERT_TO_TEXT` , with the following behaviors:\n\n`CONVERT_TO_BINARY` : Converts a response payload from a Base64-encoded string to the corresponding binary blob.\n\n`CONVERT_TO_TEXT` : Converts a response payload from a binary blob to a Base64-encoded string.\n\nIf this property is not defined, the response payload will be passed through from the integration response to the route response or method response without modification.", "CredentialsArn": "Specifies the credentials required for the integration, if any. For AWS integrations, three options are available. To specify an IAM Role for API Gateway to assume, use the role's Amazon Resource Name (ARN). To require that the caller's identity be passed through from the request, specify the string `arn:aws:iam::*:user/*` . To use resource-based permissions on supported AWS services, don't specify this parameter.", "Description": "The description of the integration.", - "IntegrationMethod": "Specifies the integration's HTTP method type.", + "IntegrationMethod": "Specifies the integration's HTTP method type. For WebSocket APIs, if you use a Lambda integration, you must set the integration method to `POST` .", "IntegrationSubtype": "Supported only for HTTP API `AWS_PROXY` integrations. Specifies the AWS service action to invoke. To learn more, see [Integration subtype reference](https://docs.aws.amazon.com/apigateway/latest/developerguide/http-api-develop-integrations-aws-services-reference.html) .", "IntegrationType": "The integration type of an integration. One of the following:\n\n`AWS` : for integrating the route or method request with an AWS service action, including the Lambda function-invoking action. With the Lambda function-invoking action, this is referred to as the Lambda custom integration. With any other AWS service action, this is known as AWS integration. Supported only for WebSocket APIs.\n\n`AWS_PROXY` : for integrating the route or method request with a Lambda function or other AWS service action. This integration is also referred to as a Lambda proxy integration.\n\n`HTTP` : for integrating the route or method request with an HTTP endpoint. This integration is also referred to as the HTTP custom integration. Supported only for WebSocket APIs.\n\n`HTTP_PROXY` : for integrating the route or method request with an HTTP endpoint, with the client request passed through as-is. This is also referred to as HTTP proxy integration. For HTTP API private integrations, use an `HTTP_PROXY` integration.\n\n`MOCK` : for integrating the route or method request with API Gateway as a \"loopback\" endpoint without invoking any backend. Supported only for WebSocket APIs.", "IntegrationUri": "For a Lambda integration, specify the URI of a Lambda function.\n\nFor an HTTP integration, specify a fully-qualified URL.\n\nFor an HTTP API private integration, specify the ARN of an Application Load Balancer listener, Network Load Balancer listener, or AWS Cloud Map service. If you specify the ARN of an AWS Cloud Map service, API Gateway uses `DiscoverInstances` to identify resources. You can use query parameters to target specific resources. To learn more, see [DiscoverInstances](https://docs.aws.amazon.com/cloud-map/latest/api/API_DiscoverInstances.html) . For private integrations, all resources must be owned by the same AWS account .", @@ -2822,18 +2834,18 @@ "AWS::AppStream::Fleet": { "ComputeCapacity": "The desired capacity for the fleet. This is not allowed for Elastic fleets.", "Description": "The description to display.", - "DisconnectTimeoutInSeconds": "The amount of time that a streaming session remains active after users disconnect. If users try to reconnect to the streaming session after a disconnection or network interruption within this time interval, they are connected to their previous session. Otherwise, they are connected to a new session with a new streaming instance.\n\nSpecify a value between 60 and 360000.", + "DisconnectTimeoutInSeconds": "The amount of time that a streaming session remains active after users disconnect. If users try to reconnect to the streaming session after a disconnection or network interruption within this time interval, they are connected to their previous session. Otherwise, they are connected to a new session with a new streaming instance.\n\nSpecify a value between 60 and 36000.", "DisplayName": "The fleet name to display.", "DomainJoinInfo": "The name of the directory and organizational unit (OU) to use to join the fleet to a Microsoft Active Directory domain. This is not allowed for Elastic fleets.", "EnableDefaultInternetAccess": "Enables or disables default internet access for the fleet.", "FleetType": "The fleet type.\n\n- **ALWAYS_ON** - Provides users with instant-on access to their apps. You are charged for all running instances in your fleet, even if no users are streaming apps.\n- **ON_DEMAND** - Provide users with access to applications after they connect, which takes one to two minutes. You are charged for instance streaming when users are connected and a small hourly fee for instances that are not streaming apps.\n- **ELASTIC** - The pool of streaming instances is managed by Amazon AppStream 2.0. When a user selects their application or desktop to launch, they will start streaming after the app block has been downloaded and mounted to a streaming instance.\n\n*Allowed Values* : `ALWAYS_ON` | `ELASTIC` | `ON_DEMAND`", "IamRoleArn": "The ARN of the IAM role that is applied to the fleet. To assume a role, the fleet instance calls the AWS Security Token Service `AssumeRole` API operation and passes the ARN of the role to use. The operation creates a new session with temporary credentials. AppStream 2.0 retrieves the temporary credentials and creates the *appstream_machine_role* credential profile on the instance.\n\nFor more information, see [Using an IAM Role to Grant Permissions to Applications and Scripts Running on AppStream 2.0 Streaming Instances](https://docs.aws.amazon.com/appstream2/latest/developerguide/using-iam-roles-to-grant-permissions-to-applications-scripts-streaming-instances.html) in the *Amazon AppStream 2.0 Administration Guide* .", - "IdleDisconnectTimeoutInSeconds": "The amount of time that users can be idle (inactive) before they are disconnected from their streaming session and the `DisconnectTimeoutInSeconds` time interval begins. Users are notified before they are disconnected due to inactivity. If they try to reconnect to the streaming session before the time interval specified in `DisconnectTimeoutInSeconds` elapses, they are connected to their previous session. Users are considered idle when they stop providing keyboard or mouse input during their streaming session. File uploads and downloads, audio in, audio out, and pixels changing do not qualify as user activity. If users continue to be idle after the time interval in `IdleDisconnectTimeoutInSeconds` elapses, they are disconnected.\n\nTo prevent users from being disconnected due to inactivity, specify a value of 0. Otherwise, specify a value between 60 and 3600.\n\nIf you enable this feature, we recommend that you specify a value that corresponds exactly to a whole number of minutes (for example, 60, 120, and 180). If you don't do this, the value is rounded to the nearest minute. For example, if you specify a value of 70, users are disconnected after 1 minute of inactivity. If you specify a value that is at the midpoint between two different minutes, the value is rounded up. For example, if you specify a value of 90, users are disconnected after 2 minutes of inactivity.", + "IdleDisconnectTimeoutInSeconds": "The amount of time that users can be idle (inactive) before they are disconnected from their streaming session and the `DisconnectTimeoutInSeconds` time interval begins. Users are notified before they are disconnected due to inactivity. If they try to reconnect to the streaming session before the time interval specified in `DisconnectTimeoutInSeconds` elapses, they are connected to their previous session. Users are considered idle when they stop providing keyboard or mouse input during their streaming session. File uploads and downloads, audio in, audio out, and pixels changing do not qualify as user activity. If users continue to be idle after the time interval in `IdleDisconnectTimeoutInSeconds` elapses, they are disconnected.\n\nTo prevent users from being disconnected due to inactivity, specify a value of 0. Otherwise, specify a value between 60 and 36000.\n\nIf you enable this feature, we recommend that you specify a value that corresponds exactly to a whole number of minutes (for example, 60, 120, and 180). If you don't do this, the value is rounded to the nearest minute. For example, if you specify a value of 70, users are disconnected after 1 minute of inactivity. If you specify a value that is at the midpoint between two different minutes, the value is rounded up. For example, if you specify a value of 90, users are disconnected after 2 minutes of inactivity.", "ImageArn": "The ARN of the public, private, or shared image to use.", "ImageName": "The name of the image used to create the fleet.", "InstanceType": "The instance type to use when launching fleet instances. The following instance types are available for non-Elastic fleets:\n\n- stream.standard.small\n- stream.standard.medium\n- stream.standard.large\n- stream.compute.large\n- stream.compute.xlarge\n- stream.compute.2xlarge\n- stream.compute.4xlarge\n- stream.compute.8xlarge\n- stream.memory.large\n- stream.memory.xlarge\n- stream.memory.2xlarge\n- stream.memory.4xlarge\n- stream.memory.8xlarge\n- stream.memory.z1d.large\n- stream.memory.z1d.xlarge\n- stream.memory.z1d.2xlarge\n- stream.memory.z1d.3xlarge\n- stream.memory.z1d.6xlarge\n- stream.memory.z1d.12xlarge\n- stream.graphics-design.large\n- stream.graphics-design.xlarge\n- stream.graphics-design.2xlarge\n- stream.graphics-design.4xlarge\n- stream.graphics-desktop.2xlarge\n- stream.graphics.g4dn.xlarge\n- stream.graphics.g4dn.2xlarge\n- stream.graphics.g4dn.4xlarge\n- stream.graphics.g4dn.8xlarge\n- stream.graphics.g4dn.12xlarge\n- stream.graphics.g4dn.16xlarge\n- stream.graphics-pro.4xlarge\n- stream.graphics-pro.8xlarge\n- stream.graphics-pro.16xlarge\n\nThe following instance types are available for Elastic fleets:\n\n- stream.standard.small\n- stream.standard.medium", "MaxConcurrentSessions": "The maximum number of concurrent sessions that can be run on an Elastic fleet. This setting is required for Elastic fleets, but is not used for other fleet types.", - "MaxSessionsPerInstance": "The maximum number of user sessions on an instance. This only applies to multi-session fleets.", + "MaxSessionsPerInstance": "Max number of user sessions on an instance. This is applicable only for multi-session fleets.", "MaxUserDurationInSeconds": "The maximum amount of time that a streaming session can remain active, in seconds. If users are still connected to a streaming instance five minutes before this limit is reached, they are prompted to save any open documents before being disconnected. After this time elapses, the instance is terminated and replaced by a new instance.\n\nSpecify a value between 600 and 432000.", "Name": "A unique name for the fleet.", "Platform": "The platform of the fleet. Platform is a required setting for Elastic fleets, and is not used for other fleet types.", @@ -2845,7 +2857,7 @@ }, "AWS::AppStream::Fleet ComputeCapacity": { "DesiredInstances": "The desired number of streaming instances.", - "DesiredSessions": "The desired number of user sessions for a multi-session fleet. This is not allowed for single-session fleets.\n\nWhen you create a fleet, you must set either the DesiredSessions or DesiredInstances attribute, based on the type of fleet you create. You can\u2019t define both attributes or leave both attributes blank." + "DesiredSessions": "The desired capacity in terms of number of user sessions, for the multi-session fleet. This is not allowed for single-session fleets.\n\nWhen you create a fleet, you must set define either the DesiredSessions or DesiredInstances attribute, based on the type of fleet you create. You can\u2019t define both attributes or leave both attributes blank." }, "AWS::AppStream::Fleet DomainJoinInfo": { "DirectoryName": "The fully qualified name of the directory (for example, corp.example.com).", @@ -2932,7 +2944,7 @@ }, "AWS::AppStream::Stack UserSetting": { "Action": "The action that is enabled or disabled.", - "MaximumLength": "Specifies the number of characters that can be copied by end users from the local device to the remote session, and to the local device from the remote session.\n\nThis can be specified only for the `CLIPBOARD_COPY_FROM_LOCAL_DEVICE` and `CLIPBOARD_COPY_TO_LOCAL_DEVICE` actions.\n\nThis defaults to 20,971,520 (20 MB) when unspecified and the permission is `ENABLED` . This can't be specified when the permission is `DISABLED` .\n\nThis can only be specified for AlwaysOn and OnDemand fleets. The attribute is not supported on Elastic fleets.\n\nThe value can be between 1 and 20,971,520 (20 MB).", + "MaximumLength": "Specifies the number of characters that can be copied by end users from the local device to the remote session, and to the local device from the remote session.\n\nThis can be specified only for the `CLIPBOARD_COPY_FROM_LOCAL_DEVICE` and `CLIPBOARD_COPY_TO_LOCAL_DEVICE` actions.\n\nThis defaults to 20,971,520 (20 MB) when unspecified and the permission is `ENABLED` . This can't be specified when the permission is `DISABLED` .\n\nThe value can be between 1 and 20,971,520 (20 MB).", "Permission": "Indicates whether the action is enabled or disabled." }, "AWS::AppStream::StackFleetAssociation": { @@ -2956,6 +2968,7 @@ "ApiCachingBehavior": "Caching behavior.\n\n- *FULL_REQUEST_CACHING* : All requests are fully cached.\n- *PER_RESOLVER_CACHING* : Individual resolvers that you specify are cached.", "ApiId": "The GraphQL API ID.", "AtRestEncryptionEnabled": "At-rest encryption flag for cache. You cannot update this setting after creation.", + "HealthMetricsConfig": "Controls how cache health metrics will be emitted to CloudWatch. Cache health metrics include:\n\n- *NetworkBandwidthOutAllowanceExceeded* : The network packets dropped because the throughput exceeded the aggregated bandwidth limit. This is useful for diagnosing bottlenecks in a cache configuration.\n- *EngineCPUUtilization* : The CPU utilization (percentage) allocated to the Redis process. This is useful for diagnosing bottlenecks in a cache configuration.\n\nMetrics will be recorded by API ID. You can set the value to `ENABLED` or `DISABLED` .", "TransitEncryptionEnabled": "Transit encryption flag when connecting to cache. You cannot update this setting after creation.", "Ttl": "TTL in seconds for cache entries.\n\nValid values are 1\u20133,600 seconds.", "Type": "The cache instance type. Valid values are\n\n- `SMALL`\n- `MEDIUM`\n- `LARGE`\n- `XLARGE`\n- `LARGE_2X`\n- `LARGE_4X`\n- `LARGE_8X` (not available in all regions)\n- `LARGE_12X`\n\nHistorically, instance types were identified by an EC2-style value. As of July 2020, this is deprecated, and the generic identifiers above should be used.\n\nThe following legacy instance types are available, but their use is discouraged:\n\n- *T2_SMALL* : A t2.small instance type.\n- *T2_MEDIUM* : A t2.medium instance type.\n- *R4_LARGE* : A r4.large instance type.\n- *R4_XLARGE* : A r4.xlarge instance type.\n- *R4_2XLARGE* : A r4.2xlarge instance type.\n- *R4_4XLARGE* : A r4.4xlarge instance type.\n- *R4_8XLARGE* : A r4.8xlarge instance type." @@ -2973,6 +2986,7 @@ "EventBridgeConfig": "An EventBridge configuration that contains a valid ARN of an event bus.", "HttpConfig": "Endpoints for an HTTP data source.", "LambdaConfig": "An ARN of a Lambda function in valid ARN format. This can be the ARN of a Lambda function that exists in the current account or in another account.", + "MetricsConfig": "Enables or disables enhanced data source metrics for specified data sources. Note that `MetricsConfig` won't be used unless the `dataSourceLevelMetricsBehavior` value is set to `PER_DATA_SOURCE_METRICS` . If the `dataSourceLevelMetricsBehavior` is set to `FULL_REQUEST_DATA_SOURCE_METRICS` instead, `MetricsConfig` will be ignored. However, you can still set its value.\n\n`MetricsConfig` can be `ENABLED` or `DISABLED` .", "Name": "Friendly name for you to identify your AppSync data source after creation.", "OpenSearchServiceConfig": "AWS Region and Endpoints for an Amazon OpenSearch Service domain in your account.", "RelationalDatabaseConfig": "Relational Database configuration of the relational database data source.", @@ -3069,7 +3083,8 @@ "AdditionalAuthenticationProviders": "A list of additional authentication providers for the `GraphqlApi` API.", "ApiType": "The value that indicates whether the GraphQL API is a standard API ( `GRAPHQL` ) or merged API ( `MERGED` ).\n\n*WARNING* : If the `ApiType` has not been defined, *explicitly* setting it to `GRAPHQL` in a template/stack update will result in an API replacement and new DNS values.\n\nThe following values are valid:\n\n`GRAPHQL | MERGED`", "AuthenticationType": "Security configuration for your GraphQL API. For allowed values (such as `API_KEY` , `AWS_IAM` , `AMAZON_COGNITO_USER_POOLS` , `OPENID_CONNECT` , or `AWS_LAMBDA` ), see [Security](https://docs.aws.amazon.com/appsync/latest/devguide/security.html) in the *AWS AppSync Developer Guide* .", - "EnvironmentVariables": "A map containing the list of resources with their properties and environment variables. For more information, see [Environmental variables](https://docs.aws.amazon.com/appsync/latest/devguide/environmental-variables.html) .", + "EnhancedMetricsConfig": "Enables and controls the enhanced metrics feature. Enhanced metrics emit granular data on API usage and performance such as AppSync request and error counts, latency, and cache hits/misses. All enhanced metric data is sent to your CloudWatch account, and you can configure the types of data that will be sent.\n\nEnhanced metrics can be configured at the resolver, data source, and operation levels. For more information, see [Monitoring and logging](https://docs.aws.amazon.com//appsync/latest/devguide/monitoring.html#cw-metrics) in the *AWS AppSync User Guide* .", + "EnvironmentVariables": "A map containing the list of resources with their properties and environment variables. For more information, see [Environmental variables](https://docs.aws.amazon.com/appsync/latest/devguide/environmental-variables.html) .\n\n*Pattern* : `^[A-Za-z]+\\\\w*$\\\\`\n\n*Minimum* : 2\n\n*Maximum* : 64", "IntrospectionConfig": "Sets the value of the GraphQL API to enable ( `ENABLED` ) or disable ( `DISABLED` ) introspection. If no value is provided, the introspection configuration will be set to `ENABLED` by default. This field will produce an error if the operation attempts to use the introspection feature while this field is disabled.\n\nFor more information about introspection, see [GraphQL introspection](https://docs.aws.amazon.com/https://graphql.org/learn/introspection/) .", "LambdaAuthorizerConfig": "A `LambdaAuthorizerConfig` holds configuration on how to authorize AWS AppSync API access when using the `AWS_LAMBDA` authorizer mode. Be aware that an AWS AppSync API may have only one Lambda authorizer configured at a time.", "LogConfig": "The Amazon CloudWatch Logs configuration.", @@ -3095,6 +3110,11 @@ "AwsRegion": "The AWS Region in which the user pool was created.", "UserPoolId": "The user pool ID." }, + "AWS::AppSync::GraphQLApi EnhancedMetricsConfig": { + "DataSourceLevelMetricsBehavior": "Controls how data source metrics will be emitted to CloudWatch. Data source metrics include:\n\n- *Requests* : The number of invocations that occured during a request.\n- *Latency* : The time to complete a data source invocation.\n- *Errors* : The number of errors that occurred during a data source invocation.\n\nThese metrics can be emitted to CloudWatch per data source or for all data sources in the request. Metrics will be recorded by API ID and data source name. `dataSourceLevelMetricsBehavior` accepts one of these values at a time:\n\n- `FULL_REQUEST_DATA_SOURCE_METRICS` : Records and emits metric data for all data sources in the request.\n- `PER_DATA_SOURCE_METRICS` : Records and emits metric data for data sources that have the `MetricsConfig` value set to `ENABLED` .", + "OperationLevelMetricsConfig": "Controls how operation metrics will be emitted to CloudWatch. Operation metrics include:\n\n- *Requests* : The number of times a specified GraphQL operation was called.\n- *GraphQL errors* : The number of GraphQL errors that occurred during a specified GraphQL operation.\n\nMetrics will be recorded by API ID and operation name. You can set the value to `ENABLED` or `DISABLED` .", + "ResolverLevelMetricsBehavior": "Controls how resolver metrics will be emitted to CloudWatch. Resolver metrics include:\n\n- *GraphQL errors* : The number of GraphQL errors that occurred.\n- *Requests* : The number of invocations that occurred during a request.\n- *Latency* : The time to complete a resolver invocation.\n- *Cache hits* : The number of cache hits during a request.\n- *Cache misses* : The number of cache misses during a request.\n\nThese metrics can be emitted to CloudWatch per resolver or for all resolvers in the request. Metrics will be recorded by API ID and resolver name. `resolverLevelMetricsBehavior` accepts one of these values at a time:\n\n- `FULL_REQUEST_RESOLVER_METRICS` : Records and emits metric data for all resolvers in the request.\n- `PER_RESOLVER_METRICS` : Records and emits metric data for resolvers that have the `MetricsConfig` value set to `ENABLED` ." + }, "AWS::AppSync::GraphQLApi LambdaAuthorizerConfig": { "AuthorizerResultTtlInSeconds": "The number of seconds a response should be cached for. The default is 0 seconds, which disables caching. If you don't specify a value for `authorizerResultTtlInSeconds` , the default value is used. The maximum value is one hour (3600 seconds). The Lambda function can override this by returning a `ttlOverride` key in its response.", "AuthorizerUri": "The ARN of the Lambda function to be called for authorization. This may be a standard Lambda ARN, a version ARN ( `.../v3` ) or alias ARN.\n\n*Note* : This Lambda function must have the following resource-based policy assigned to it. When configuring Lambda authorizers in the console, this is done for you. To do so with the AWS CLI , run the following:\n\n`aws lambda add-permission --function-name \"arn:aws:lambda:us-east-2:111122223333:function:my-function\" --statement-id \"appsync\" --principal appsync.amazonaws.com --action lambda:InvokeFunction`", @@ -3135,6 +3155,7 @@ "FieldName": "The GraphQL field on a type that invokes the resolver.", "Kind": "The resolver type.\n\n- *UNIT* : A UNIT resolver type. A UNIT resolver is the default resolver type. You can use a UNIT resolver to run a GraphQL query against a single data source.\n- *PIPELINE* : A PIPELINE resolver type. You can use a PIPELINE resolver to invoke a series of `Function` objects in a serial manner. You can use a pipeline resolver to run a GraphQL query against multiple data sources.", "MaxBatchSize": "The maximum number of resolver request inputs that will be sent to a single AWS Lambda function in a `BatchInvoke` operation.", + "MetricsConfig": "Enables or disables enhanced resolver metrics for specified resolvers. Note that `MetricsConfig` won't be used unless the `resolverLevelMetricsBehavior` value is set to `PER_RESOLVER_METRICS` . If the `resolverLevelMetricsBehavior` is set to `FULL_REQUEST_RESOLVER_METRICS` instead, `MetricsConfig` will be ignored. However, you can still set its value.", "PipelineConfig": "Functions linked with the pipeline resolver.", "RequestMappingTemplate": "The request mapping template.\n\nRequest mapping templates are optional when using a Lambda data source. For all other data sources, a request mapping template is required.", "RequestMappingTemplateS3Location": "The location of a request mapping template in an Amazon S3 bucket. Use this if you want to provision with a template file in Amazon S3 rather than embedding it in your CloudFormation template.", @@ -3564,14 +3585,14 @@ "InstanceGenerations": "Indicates whether current or previous generation instance types are included.\n\n- For current generation instance types, specify `current` . The current generation includes EC2 instance types currently recommended for use. This typically includes the latest two to three generations in each instance family. For more information, see [Instance types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) in the *Amazon EC2 User Guide for Linux Instances* .\n- For previous generation instance types, specify `previous` .\n\nDefault: Any current or previous generation", "LocalStorage": "Indicates whether instance types with instance store volumes are included, excluded, or required. For more information, see [Amazon EC2 instance store](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/InstanceStorage.html) in the *Amazon EC2 User Guide for Linux Instances* .\n\nDefault: `included`", "LocalStorageTypes": "Indicates the type of local storage that is required.\n\n- For instance types with hard disk drive (HDD) storage, specify `hdd` .\n- For instance types with solid state drive (SSD) storage, specify `ssd` .\n\nDefault: Any local storage type", - "MaxSpotPriceAsPercentageOfOptimalOnDemandPrice": "[Price protection] The price protection threshold for Spot Instances, as a percentage of an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified price is from either the lowest priced current generation instance types or, failing that, the lowest priced previous generation instance types that match your attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage.\n\nTo indicate no price protection threshold, specify a high value, such as `999999` .\n\nIf you set `DesiredCapacityType` to `vcpu` or `memory-mib` , the price protection threshold is based on the per-vCPU or per-memory price instead of the per instance price.\n\n> Only one of `SpotMaxPricePercentageOverLowestPrice` or `MaxSpotPriceAsPercentageOfOptimalOnDemandPrice` can be specified. If you don't specify either, then `SpotMaxPricePercentageOverLowestPrice` is used and the value for that parameter defaults to `100` .", + "MaxSpotPriceAsPercentageOfOptimalOnDemandPrice": "[Price protection] The price protection threshold for Spot Instances, as a percentage of an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified price is from either the lowest priced current generation instance types or, failing that, the lowest priced previous generation instance types that match your attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage.\n\nIf you set `DesiredCapacityType` to `vcpu` or `memory-mib` , the price protection threshold is based on the per-vCPU or per-memory price instead of the per instance price.\n\n> Only one of `SpotMaxPricePercentageOverLowestPrice` or `MaxSpotPriceAsPercentageOfOptimalOnDemandPrice` can be specified. If you don't specify either, Amazon EC2 Auto Scaling will automatically apply optimal price protection to consistently select from a wide range of instance types. To indicate no price protection threshold for Spot Instances, meaning you want to consider all instance types that match your attributes, include one of these parameters and specify a high value, such as `999999` .", "MemoryGiBPerVCpu": "The minimum and maximum amount of memory per vCPU for an instance type, in GiB.\n\nDefault: No minimum or maximum limits", "MemoryMiB": "The minimum and maximum instance memory size for an instance type, in MiB.", "NetworkBandwidthGbps": "The minimum and maximum amount of network bandwidth, in gigabits per second (Gbps).\n\nDefault: No minimum or maximum limits", "NetworkInterfaceCount": "The minimum and maximum number of network interfaces for an instance type.\n\nDefault: No minimum or maximum limits", "OnDemandMaxPricePercentageOverLowestPrice": "[Price protection] The price protection threshold for On-Demand Instances, as a percentage higher than an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified price is from either the lowest priced current generation instance types or, failing that, the lowest priced previous generation instance types that match your attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage.\n\nTo turn off price protection, specify a high value, such as `999999` .\n\nIf you set `DesiredCapacityType` to `vcpu` or `memory-mib` , the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per instance price.\n\nDefault: `20`", "RequireHibernateSupport": "Indicates whether instance types must provide On-Demand Instance hibernation support.\n\nDefault: `false`", - "SpotMaxPricePercentageOverLowestPrice": "[Price protection] The price protection threshold for Spot Instances, as a percentage higher than an identified Spot price. The identified Spot price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified price is from either the lowest priced current generation instance types or, failing that, the lowest priced previous generation instance types that match your attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage.\n\nTo turn off price protection, specify a high value, such as `999999` .\n\nIf you set `DesiredCapacityType` to `vcpu` or `memory-mib` , the price protection threshold is based on the per-vCPU or per-memory price instead of the per instance price.\n\n> Only one of `SpotMaxPricePercentageOverLowestPrice` or `MaxSpotPriceAsPercentageOfOptimalOnDemandPrice` can be specified. \n\nDefault: `100`", + "SpotMaxPricePercentageOverLowestPrice": "[Price protection] The price protection threshold for Spot Instances, as a percentage higher than an identified Spot price. The identified Spot price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified price is from either the lowest priced current generation instance types or, failing that, the lowest priced previous generation instance types that match your attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage.\n\nIf you set `DesiredCapacityType` to `vcpu` or `memory-mib` , the price protection threshold is based on the per-vCPU or per-memory price instead of the per instance price.\n\n> Only one of `SpotMaxPricePercentageOverLowestPrice` or `MaxSpotPriceAsPercentageOfOptimalOnDemandPrice` can be specified. If you don't specify either, Amazon EC2 Auto Scaling will automatically apply optimal price protection to consistently select from a wide range of instance types. To indicate no price protection threshold for Spot Instances, meaning you want to consider all instance types that match your attributes, include one of these parameters and specify a high value, such as `999999` .", "TotalLocalStorageGB": "The minimum and maximum total local storage size for an instance type, in GB.\n\nDefault: No minimum or maximum limits", "VCpuCount": "The minimum and maximum number of vCPUs for an instance type." }, @@ -3934,7 +3955,6 @@ "EdiType": "Returns the details for the EDI standard that is being used for the transformer. Currently, only X12 is supported. X12 is a set of standards and corresponding messages that define specific business documents.", "FileFormat": "Returns that the currently supported file formats for EDI transformations are `JSON` and `XML` .", "MappingTemplate": "Returns a sample EDI document that is used by a transformer as a guide for processing the EDI data.", - "ModifiedAt": "Returns a timestamp representing the date and time for the most recent change for the transformer object.", "Name": "Returns the descriptive name for the transformer.", "SampleDocument": "Returns a sample EDI document that is used by a transformer as a guide for processing the EDI data.", "Status": "Returns the state of the newly created transformer. The transformer can be either `active` or `inactive` . For the transformer to be used in a capability, its status must `active` .", @@ -3972,7 +3992,7 @@ "RecoveryPointTags": "To help organize your resources, you can assign your own metadata to the resources that you create. Each tag is a key-value pair.", "RuleName": "A display name for a backup rule.", "ScheduleExpression": "A CRON expression specifying when AWS Backup initiates a backup job.", - "ScheduleExpressionTimezone": "", + "ScheduleExpressionTimezone": "This is the timezone in which the schedule expression is set. By default, ScheduleExpressions are in UTC. You can modify this to a specified timezone.", "StartWindowMinutes": "An optional value that specifies a period of time in minutes after a backup is scheduled before a job is canceled if it doesn't start successfully.\n\nIf this value is included, it must be at least 60 minutes to avoid errors.", "TargetBackupVault": "The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the AWS Region where they are created. They consist of letters, numbers, and hyphens." }, @@ -4042,7 +4062,7 @@ "AWS::Backup::Framework ControlScope": { "ComplianceResourceIds": "The ID of the only AWS resource that you want your control scope to contain.", "ComplianceResourceTypes": "Describes whether the control scope includes one or more types of resources, such as `EFS` or `RDS` .", - "Tags": "The tag key-value pair applied to those AWS resources that you want to trigger an evaluation for a rule. A maximum of one key-value pair can be provided. The tag value is optional, but it cannot be an empty string. The structure to assign a tag is: `[{\"Key\":\"string\",\"Value\":\"string\"}]` ." + "Tags": "The tag key-value pair applied to those AWS resources that you want to trigger an evaluation for a rule. A maximum of one key-value pair can be provided. The tag value is optional, but it cannot be an empty string if you are creating or editing a framework from the console (though the value can be an empty string when included in a CloudFormation template).\n\nThe structure to assign a tag is: `[{\"Key\":\"string\",\"Value\":\"string\"}]` ." }, "AWS::Backup::Framework FrameworkControl": { "ControlInputParameters": "A list of `ParameterName` and `ParameterValue` pairs.", @@ -4074,7 +4094,7 @@ }, "AWS::Backup::ReportPlan Tag": { "Key": "", - "Value": "" + "Value": "The value of the key.\n\nLength Constraints: Maximum length of 256.\n\nPattern: `^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$`" }, "AWS::Backup::RestoreTestingPlan": { "RecoveryPointSelection": "The specified criteria to assign a set of resources, such as recovery point types or backup vaults.", @@ -4082,7 +4102,7 @@ "ScheduleExpression": "A CRON expression in specified timezone when a restore testing plan is executed.", "ScheduleExpressionTimezone": "Optional. This is the timezone in which the schedule expression is set. By default, ScheduleExpressions are in UTC. You can modify this to a specified timezone.", "StartWindowHours": "Defaults to 24 hours.\n\nA value in hours after a restore test is scheduled before a job will be canceled if it doesn't start successfully. This value is optional. If this value is included, this parameter has a maximum value of 168 hours (one week).", - "Tags": "" + "Tags": "Optional tags to include. A tag is a key-value pair you can use to manage, filter, and search for your resources. Allowed characters include UTF-8 letters,numbers, spaces, and the following characters: `+ - = . _ : /.`" }, "AWS::Backup::RestoreTestingPlan RestoreTestingRecoveryPointSelection": { "Algorithm": "Acceptable values include \"LATEST_WITHIN_WINDOW\" or \"RANDOM_WITHIN_WINDOW\"", @@ -4142,8 +4162,8 @@ "AllocationStrategy": "The allocation strategy to use for the compute resource if not enough instances of the best fitting instance type can be allocated. This might be because of availability of the instance type in the Region or [Amazon EC2 service limits](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-resource-limits.html) . For more information, see [Allocation strategies](https://docs.aws.amazon.com/batch/latest/userguide/allocation-strategies.html) in the *AWS Batch User Guide* .\n\nWhen updating a compute environment, changing the allocation strategy requires an infrastructure update of the compute environment. For more information, see [Updating compute environments](https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html) in the *AWS Batch User Guide* . `BEST_FIT` is not supported when updating a compute environment.\n\n> This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be specified. \n\n- **BEST_FIT (default)** - AWS Batch selects an instance type that best fits the needs of the jobs with a preference for the lowest-cost instance type. If additional instances of the selected instance type aren't available, AWS Batch waits for the additional instances to be available. If there aren't enough instances available, or if the user is reaching [Amazon EC2 service limits](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-resource-limits.html) then additional jobs aren't run until the currently running jobs have completed. This allocation strategy keeps costs lower but can limit scaling. If you are using Spot Fleets with `BEST_FIT` then the Spot Fleet IAM role must be specified.\n- **BEST_FIT_PROGRESSIVE** - AWS Batch will select additional instance types that are large enough to meet the requirements of the jobs in the queue, with a preference for instance types with a lower cost per unit vCPU. If additional instances of the previously selected instance types aren't available, AWS Batch will select new instance types.\n- **SPOT_CAPACITY_OPTIMIZED** - AWS Batch will select one or more instance types that are large enough to meet the requirements of the jobs in the queue, with a preference for instance types that are less likely to be interrupted. This allocation strategy is only available for Spot Instance compute resources.\n- **SPOT_PRICE_CAPACITY_OPTIMIZED** - The price and capacity optimized allocation strategy looks at both price and capacity to select the Spot Instance pools that are the least likely to be interrupted and have the lowest possible price. This allocation strategy is only available for Spot Instance compute resources.\n\n> We recommend that you use `SPOT_PRICE_CAPACITY_OPTIMIZED` rather than `SPOT_CAPACITY_OPTIMIZED` in most instances.\n\nWith `BEST_FIT_PROGRESSIVE` , `SPOT_CAPACITY_OPTIMIZED` , and `SPOT_PRICE_CAPACITY_OPTIMIZED` allocation strategies using On-Demand or Spot Instances, and the `BEST_FIT` strategy using Spot Instances, AWS Batch might need to go above `maxvCpus` to meet your capacity requirements. In this event, AWS Batch never exceeds `maxvCpus` by more than a single instance.", "BidPercentage": "The maximum percentage that a Spot Instance price can be when compared with the On-Demand price for that instance type before instances are launched. For example, if your maximum percentage is 20%, the Spot price must be less than 20% of the current On-Demand price for that Amazon EC2 instance. You always pay the lowest (market) price and never more than your maximum percentage. For most use cases, we recommend leaving this field empty.\n\nWhen updating a compute environment, changing the bid percentage requires an infrastructure update of the compute environment. For more information, see [Updating compute environments](https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html) in the *AWS Batch User Guide* .\n\n> This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.", "DesiredvCpus": "The desired number of vCPUS in the compute environment. AWS Batch modifies this value between the minimum and maximum values based on job queue demand.\n\n> This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it. > AWS Batch doesn't support changing the desired number of vCPUs of an existing compute environment. Don't specify this parameter for compute environments using Amazon EKS clusters. > When you update the `desiredvCpus` setting, the value must be between the `minvCpus` and `maxvCpus` values.\n> \n> Additionally, the updated `desiredvCpus` value must be greater than or equal to the current `desiredvCpus` value. For more information, see [Troubleshooting AWS Batch](https://docs.aws.amazon.com/batch/latest/userguide/troubleshooting.html#error-desired-vcpus-update) in the *AWS Batch User Guide* .", - "Ec2Configuration": "Provides information used to select Amazon Machine Images (AMIs) for EC2 instances in the compute environment. If `Ec2Configuration` isn't specified, the default is `ECS_AL2` .\n\nWhen updating a compute environment, changing this setting requires an infrastructure update of the compute environment. For more information, see [Updating compute environments](https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html) in the *AWS Batch User Guide* . To remove the EC2 configuration and any custom AMI ID specified in `imageIdOverride` , set this value to an empty string.\n\nOne or two values can be provided.\n\n> This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.", - "Ec2KeyPair": "The Amazon EC2 key pair that's used for instances launched in the compute environment. You can use this key pair to log in to your instances with SSH. To remove the Amazon EC2 key pair, set this value to an empty string.\n\nWhen updating a compute environment, changing the EC2 key pair requires an infrastructure update of the compute environment. For more information, see [Updating compute environments](https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html) in the *AWS Batch User Guide* .\n\n> This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.", + "Ec2Configuration": "Provides information used to select Amazon Machine Images (AMIs) for Amazon EC2 instances in the compute environment. If `Ec2Configuration` isn't specified, the default is `ECS_AL2` .\n\nWhen updating a compute environment, changing this setting requires an infrastructure update of the compute environment. For more information, see [Updating compute environments](https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html) in the *AWS Batch User Guide* . To remove the Amazon EC2 configuration and any custom AMI ID specified in `imageIdOverride` , set this value to an empty string.\n\nOne or two values can be provided.\n\n> This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.", + "Ec2KeyPair": "The Amazon EC2 key pair that's used for instances launched in the compute environment. You can use this key pair to log in to your instances with SSH. To remove the Amazon EC2 key pair, set this value to an empty string.\n\nWhen updating a compute environment, changing the Amazon EC2 key pair requires an infrastructure update of the compute environment. For more information, see [Updating compute environments](https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html) in the *AWS Batch User Guide* .\n\n> This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.", "ImageId": "The Amazon Machine Image (AMI) ID used for instances launched in the compute environment. This parameter is overridden by the `imageIdOverride` member of the `Ec2Configuration` structure. To remove the custom AMI ID and use the default AMI ID, set this value to an empty string.\n\nWhen updating a compute environment, changing the AMI ID requires an infrastructure update of the compute environment. For more information, see [Updating compute environments](https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html) in the *AWS Batch User Guide* .\n\n> This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it. > The AMI that you choose for a compute environment must match the architecture of the instance types that you intend to use for that compute environment. For example, if your compute environment uses A1 instance types, the compute resource AMI that you choose must support ARM instances. Amazon ECS vends both x86 and ARM versions of the Amazon ECS-optimized Amazon Linux 2 AMI. For more information, see [Amazon ECS-optimized Amazon Linux 2 AMI](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html#ecs-optimized-ami-linux-variants.html) in the *Amazon Elastic Container Service Developer Guide* .", "InstanceRole": "The Amazon ECS instance profile applied to Amazon EC2 instances in a compute environment. Required for Amazon EC2 instances. You can specify the short name or full Amazon Resource Name (ARN) of an instance profile. For example, `*ecsInstanceRole*` or `arn:aws:iam:: ** :instance-profile/ *ecsInstanceRole*` . For more information, see [Amazon ECS instance role](https://docs.aws.amazon.com/batch/latest/userguide/instance_IAM_role.html) in the *AWS Batch User Guide* .\n\nWhen updating a compute environment, changing this setting requires an infrastructure update of the compute environment. For more information, see [Updating compute environments](https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html) in the *AWS Batch User Guide* .\n\n> This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.", "InstanceTypes": "The instances types that can be launched. You can specify instance families to launch any instance type within those families (for example, `c5` or `p3` ), or you can specify specific sizes within a family (such as `c5.8xlarge` ). You can also choose `optimal` to select instance types (from the C4, M4, and R4 instance families) that match the demand of your job queues.\n\nWhen updating a compute environment, changing this setting requires an infrastructure update of the compute environment. For more information, see [Updating compute environments](https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html) in the *AWS Batch User Guide* .\n\n> This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it. > When you create a compute environment, the instance types that you select for the compute environment must share the same architecture. For example, you can't mix x86 and ARM instances in the same compute environment. > Currently, `optimal` uses instance types from the C4, M4, and R4 instance families. In Regions that don't have instance types from those instance families, instance types from the C5, M5, and R5 instance families are used.", @@ -4151,10 +4171,10 @@ "MaxvCpus": "The maximum number of Amazon EC2 vCPUs that an environment can reach.\n\n> With `BEST_FIT_PROGRESSIVE` , `SPOT_CAPACITY_OPTIMIZED` and `SPOT_PRICE_CAPACITY_OPTIMIZED` (recommended) strategies using On-Demand or Spot Instances, and the `BEST_FIT` strategy using Spot Instances, AWS Batch might need to exceed `maxvCpus` to meet your capacity requirements. In this event, AWS Batch never exceeds `maxvCpus` by more than a single instance.", "MinvCpus": "The minimum number of vCPUs that an environment should maintain (even if the compute environment is `DISABLED` ).\n\n> This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.", "PlacementGroup": "The Amazon EC2 placement group to associate with your compute resources. If you intend to submit multi-node parallel jobs to your compute environment, you should consider creating a cluster placement group and associate it with your compute resources. This keeps your multi-node parallel job on a logical grouping of instances within a single Availability Zone with high network flow potential. For more information, see [Placement groups](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/placement-groups.html) in the *Amazon EC2 User Guide for Linux Instances* .\n\nWhen updating a compute environment, changing the placement group requires an infrastructure update of the compute environment. For more information, see [Updating compute environments](https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html) in the *AWS Batch User Guide* .\n\n> This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.", - "SecurityGroupIds": "The Amazon EC2 security groups that are associated with instances launched in the compute environment. This parameter is required for Fargate compute resources, where it can contain up to 5 security groups. For Fargate compute resources, providing an empty list is handled as if this parameter wasn't specified and no change is made. For EC2 compute resources, providing an empty list removes the security groups from the compute resource.\n\nWhen updating a compute environment, changing the EC2 security groups requires an infrastructure update of the compute environment. For more information, see [Updating compute environments](https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html) in the *AWS Batch User Guide* .", + "SecurityGroupIds": "The Amazon EC2 security groups that are associated with instances launched in the compute environment. This parameter is required for Fargate compute resources, where it can contain up to 5 security groups. For Fargate compute resources, providing an empty list is handled as if this parameter wasn't specified and no change is made. For Amazon EC2 compute resources, providing an empty list removes the security groups from the compute resource.\n\nWhen updating a compute environment, changing the Amazon EC2 security groups requires an infrastructure update of the compute environment. For more information, see [Updating compute environments](https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html) in the *AWS Batch User Guide* .", "SpotIamFleetRole": "The Amazon Resource Name (ARN) of the Amazon EC2 Spot Fleet IAM role applied to a `SPOT` compute environment. This role is required if the allocation strategy set to `BEST_FIT` or if the allocation strategy isn't specified. For more information, see [Amazon EC2 spot fleet role](https://docs.aws.amazon.com/batch/latest/userguide/spot_fleet_IAM_role.html) in the *AWS Batch User Guide* .\n\n> This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it. > To tag your Spot Instances on creation, the Spot Fleet IAM role specified here must use the newer *AmazonEC2SpotFleetTaggingRole* managed policy. The previously recommended *AmazonEC2SpotFleetRole* managed policy doesn't have the required permissions to tag Spot Instances. For more information, see [Spot instances not tagged on creation](https://docs.aws.amazon.com/batch/latest/userguide/troubleshooting.html#spot-instance-no-tag) in the *AWS Batch User Guide* .", - "Subnets": "The VPC subnets where the compute resources are launched. Fargate compute resources can contain up to 16 subnets. For Fargate compute resources, providing an empty list will be handled as if this parameter wasn't specified and no change is made. For EC2 compute resources, providing an empty list removes the VPC subnets from the compute resource. For more information, see [VPCs and subnets](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html) in the *Amazon VPC User Guide* .\n\nWhen updating a compute environment, changing the VPC subnets requires an infrastructure update of the compute environment. For more information, see [Updating compute environments](https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html) in the *AWS Batch User Guide* .\n\n> AWS Batch on Amazon EC2 and AWS Batch on Amazon EKS support Local Zones. For more information, see [Local Zones](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-local-zones) in the *Amazon EC2 User Guide for Linux Instances* , [Amazon EKS and AWS Local Zones](https://docs.aws.amazon.com/eks/latest/userguide/local-zones.html) in the *Amazon EKS User Guide* and [Amazon ECS clusters in Local Zones, Wavelength Zones, and AWS Outposts](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/cluster-regions-zones.html#clusters-local-zones) in the *Amazon ECS Developer Guide* .\n> \n> AWS Batch on Fargate doesn't currently support Local Zones.", - "Tags": "Key-value pair tags to be applied to EC2 resources that are launched in the compute environment. For AWS Batch , these take the form of `\"String1\": \"String2\"` , where `String1` is the tag key and `String2` is the tag value-for example, `{ \"Name\": \"Batch Instance - C4OnDemand\" }` . This is helpful for recognizing your Batch instances in the Amazon EC2 console. These tags aren't seen when using the AWS Batch `ListTagsForResource` API operation.\n\nWhen updating a compute environment, changing this setting requires an infrastructure update of the compute environment. For more information, see [Updating compute environments](https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html) in the *AWS Batch User Guide* .\n\n> This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.", + "Subnets": "The VPC subnets where the compute resources are launched. Fargate compute resources can contain up to 16 subnets. For Fargate compute resources, providing an empty list will be handled as if this parameter wasn't specified and no change is made. For Amazon EC2 compute resources, providing an empty list removes the VPC subnets from the compute resource. For more information, see [VPCs and subnets](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html) in the *Amazon VPC User Guide* .\n\nWhen updating a compute environment, changing the VPC subnets requires an infrastructure update of the compute environment. For more information, see [Updating compute environments](https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html) in the *AWS Batch User Guide* .\n\n> AWS Batch on Amazon EC2 and AWS Batch on Amazon EKS support Local Zones. For more information, see [Local Zones](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-local-zones) in the *Amazon EC2 User Guide for Linux Instances* , [Amazon EKS and AWS Local Zones](https://docs.aws.amazon.com/eks/latest/userguide/local-zones.html) in the *Amazon EKS User Guide* and [Amazon ECS clusters in Local Zones, Wavelength Zones, and AWS Outposts](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/cluster-regions-zones.html#clusters-local-zones) in the *Amazon ECS Developer Guide* .\n> \n> AWS Batch on Fargate doesn't currently support Local Zones.", + "Tags": "Key-value pair tags to be applied to Amazon EC2 resources that are launched in the compute environment. For AWS Batch , these take the form of `\"String1\": \"String2\"` , where `String1` is the tag key and `String2` is the tag value-for example, `{ \"Name\": \"Batch Instance - C4OnDemand\" }` . This is helpful for recognizing your Batch instances in the Amazon EC2 console. These tags aren't seen when using the AWS Batch `ListTagsForResource` API operation.\n\nWhen updating a compute environment, changing this setting requires an infrastructure update of the compute environment. For more information, see [Updating compute environments](https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html) in the *AWS Batch User Guide* .\n\n> This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.", "Type": "The type of compute environment: `EC2` , `SPOT` , `FARGATE` , or `FARGATE_SPOT` . For more information, see [Compute environments](https://docs.aws.amazon.com/batch/latest/userguide/compute_environments.html) in the *AWS Batch User Guide* .\n\nIf you choose `SPOT` , you must also specify an Amazon EC2 Spot Fleet role with the `spotIamFleetRole` parameter. For more information, see [Amazon EC2 spot fleet role](https://docs.aws.amazon.com/batch/latest/userguide/spot_fleet_IAM_role.html) in the *AWS Batch User Guide* .\n\nWhen updating compute environment, changing the type of a compute environment requires an infrastructure update of the compute environment. For more information, see [Updating compute environments](https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html) in the *AWS Batch User Guide* .\n\nWhen updating the type of a compute environment, changing between `EC2` and `SPOT` or between `FARGATE` and `FARGATE_SPOT` will initiate an infrastructure update, but if you switch between `EC2` and `FARGATE` , AWS CloudFormation will create a new compute environment.", "UpdateToLatestImageVersion": "Specifies whether the AMI ID is updated to the latest one that's supported by AWS Batch when the compute environment has an infrastructure update. The default value is `false` .\n\n> An AMI ID can either be specified in the `imageId` or `imageIdOverride` parameters or be determined by the launch template that's specified in the `launchTemplate` parameter. If an AMI ID is specified any of these ways, this parameter is ignored. For more information about to update AMI IDs during an infrastructure update, see [Updating the AMI ID](https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html#updating-compute-environments-ami) in the *AWS Batch User Guide* . \n\nWhen updating a compute environment, changing this setting requires an infrastructure update of the compute environment. For more information, see [Updating compute environments](https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html) in the *AWS Batch User Guide* ." }, @@ -4177,10 +4197,10 @@ "TerminateJobsOnUpdate": "Specifies whether jobs are automatically terminated when the computer environment infrastructure is updated. The default value is `false` ." }, "AWS::Batch::JobDefinition": { - "ContainerProperties": "An object with various properties specific to Amazon ECS based jobs. Valid values are `containerProperties` , `eksProperties` , and `nodeProperties` . Only one can be specified.", - "EksProperties": "An object with various properties that are specific to Amazon EKS based jobs. Valid values are `containerProperties` , `eksProperties` , and `nodeProperties` . Only one can be specified.", + "ContainerProperties": "An object with properties specific to Amazon ECS-based jobs. When `containerProperties` is used in the job definition, it can't be used in addition to `eksProperties` , `ecsProperties` , or `nodeProperties` .", + "EksProperties": "An object with properties that are specific to Amazon EKS-based jobs. When `eksProperties` is used in the job definition, it can't be used in addition to `containerProperties` , `ecsProperties` , or `nodeProperties` .", "JobDefinitionName": "The name of the job definition.", - "NodeProperties": "An object with various properties that are specific to multi-node parallel jobs. Valid values are `containerProperties` , `eksProperties` , and `nodeProperties` . Only one can be specified.\n\n> If the job runs on Fargate resources, don't specify `nodeProperties` . Use `containerProperties` instead.", + "NodeProperties": "An object with properties that are specific to multi-node parallel jobs. When `nodeProperties` is used in the job definition, it can't be used in addition to `containerProperties` , `ecsProperties` , or `eksProperties` .\n\n> If the job runs on Fargate resources, don't specify `nodeProperties` . Use `containerProperties` instead.", "Parameters": "Default parameters or parameter substitution placeholders that are set in the job definition. Parameters are specified as a key-value pair mapping. Parameters in a `SubmitJob` request override any corresponding parameter defaults from the job definition. For more information about specifying parameters, see [Job definition parameters](https://docs.aws.amazon.com/batch/latest/userguide/job_definition_parameters.html) in the *AWS Batch User Guide* .", "PlatformCapabilities": "The platform capabilities required by the job definition. If no value is specified, it defaults to `EC2` . Jobs run on Fargate resources specify `FARGATE` .", "PropagateTags": "Specifies whether to propagate the tags from the job or job definition to the corresponding Amazon ECS task. If no value is specified, the tags aren't propagated. Tags can only be propagated to the tasks when the tasks are created. For tags with the same name, job tags are given priority over job definitions tags. If the total number of combined tags from the job and job definition is over 50, the job is moved to the `FAILED` state.", @@ -4188,7 +4208,7 @@ "SchedulingPriority": "The scheduling priority of the job definition. This only affects jobs in job queues with a fair share policy. Jobs with a higher scheduling priority are scheduled before jobs with a lower scheduling priority.", "Tags": "The tags that are applied to the job definition.", "Timeout": "The timeout time for jobs that are submitted with this job definition. After the amount of time you specify passes, AWS Batch terminates your jobs if they aren't finished.", - "Type": "The type of job definition. For more information about multi-node parallel jobs, see [Creating a multi-node parallel job definition](https://docs.aws.amazon.com/batch/latest/userguide/multi-node-job-def.html) in the *AWS Batch User Guide* .\n\n> If the job is run on Fargate resources, then `multinode` isn't supported." + "Type": "The type of job definition. For more information about multi-node parallel jobs, see [Creating a multi-node parallel job definition](https://docs.aws.amazon.com/batch/latest/userguide/multi-node-job-def.html) in the *AWS Batch User Guide* .\n\n- If the value is `container` , then one of the following is required: `containerProperties` , `ecsProperties` , or `eksProperties` .\n- If the value is `multinode` , then `nodeProperties` is required.\n\n> If the job is run on Fargate resources, then `multinode` isn't supported." }, "AWS::Batch::JobDefinition AuthorizationConfig": { "AccessPointId": "The Amazon EFS access point ID to use. If an access point is specified, the root directory value specified in the `EFSVolumeConfiguration` must either be omitted or set to `/` which enforces the path set on the EFS access point. If an access point is used, transit encryption must be enabled in the `EFSVolumeConfiguration` . For more information, see [Working with Amazon EFS access points](https://docs.aws.amazon.com/efs/latest/ug/efs-access-points.html) in the *Amazon Elastic File System User Guide* .", @@ -4199,23 +4219,24 @@ "Environment": "The environment variables to pass to a container. This parameter maps to `Env` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/) and the `--env` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) .\n\n> We don't recommend using plaintext environment variables for sensitive information, such as credential data. > Environment variables cannot start with \" `AWS_BATCH` \". This naming convention is reserved for variables that AWS Batch sets.", "EphemeralStorage": "The amount of ephemeral storage to allocate for the task. This parameter is used to expand the total amount of ephemeral storage available, beyond the default amount, for tasks hosted on AWS Fargate .", "ExecutionRoleArn": "The Amazon Resource Name (ARN) of the execution role that AWS Batch can assume. For jobs that run on Fargate resources, you must provide an execution role. For more information, see [AWS Batch execution IAM role](https://docs.aws.amazon.com/batch/latest/userguide/execution-IAM-role.html) in the *AWS Batch User Guide* .", - "FargatePlatformConfiguration": "The platform configuration for jobs that are running on Fargate resources. Jobs that are running on EC2 resources must not specify this parameter.", + "FargatePlatformConfiguration": "The platform configuration for jobs that are running on Fargate resources. Jobs that are running on Amazon EC2 resources must not specify this parameter.", "Image": "Required. The image used to start a container. This string is passed directly to the Docker daemon. Images in the Docker Hub registry are available by default. Other repositories are specified with `*repository-url* / *image* : *tag*` . It can be 255 characters long. It can contain uppercase and lowercase letters, numbers, hyphens (-), underscores (_), colons (:), periods (.), forward slashes (/), and number signs (#). This parameter maps to `Image` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/) and the `IMAGE` parameter of [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) .\n\n> Docker image architecture must match the processor architecture of the compute resources that they're scheduled on. For example, ARM-based Docker images can only run on ARM-based compute resources. \n\n- Images in Amazon ECR Public repositories use the full `registry/repository[:tag]` or `registry/repository[@digest]` naming conventions. For example, `public.ecr.aws/ *registry_alias* / *my-web-app* : *latest*` .\n- Images in Amazon ECR repositories use the full registry and repository URI (for example, `123456789012.dkr.ecr..amazonaws.com/` ).\n- Images in official repositories on Docker Hub use a single name (for example, `ubuntu` or `mongo` ).\n- Images in other repositories on Docker Hub are qualified with an organization name (for example, `amazon/amazon-ecs-agent` ).\n- Images in other online repositories are qualified further by a domain name (for example, `quay.io/assemblyline/ubuntu` ).", "InstanceType": "The instance type to use for a multi-node parallel job. All node groups in a multi-node parallel job must use the same instance type.\n\n> This parameter isn't applicable to single-node container jobs or jobs that run on Fargate resources, and shouldn't be provided.", "JobRoleArn": "The Amazon Resource Name (ARN) of the IAM role that the container can assume for AWS permissions. For more information, see [IAM roles for tasks](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html) in the *Amazon Elastic Container Service Developer Guide* .", "LinuxParameters": "Linux-specific modifications that are applied to the container, such as details for device mappings.", - "LogConfiguration": "The log configuration specification for the container.\n\nThis parameter maps to `LogConfig` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/) and the `--log-driver` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) . By default, containers use the same logging driver that the Docker daemon uses. However the container might use a different logging driver than the Docker daemon by specifying a log driver with this parameter in the container definition. To use a different logging driver for a container, the log system must be configured properly on the container instance (or on a different log server for remote logging options). For more information on the options for different supported log drivers, see [Configure logging drivers](https://docs.aws.amazon.com/https://docs.docker.com/engine/admin/logging/overview/) in the Docker documentation.\n\n> AWS Batch currently supports a subset of the logging drivers available to the Docker daemon (shown in the `LogConfiguration` data type). \n\nThis parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version | grep \"Server API version\"`\n\n> The Amazon ECS container agent running on a container instance must register the logging drivers available on that instance with the `ECS_AVAILABLE_LOGGING_DRIVERS` environment variable before containers placed on that instance can use these log configuration options. For more information, see [Amazon ECS container agent configuration](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) in the *Amazon Elastic Container Service Developer Guide* .", - "Memory": "This parameter is deprecated, use `resourceRequirements` to specify the memory requirements for the job definition. It's not supported for jobs running on Fargate resources. For jobs that run on EC2 resources, it specifies the memory hard limit (in MiB) for a container. If your container attempts to exceed the specified number, it's terminated. You must specify at least 4 MiB of memory for a job using this parameter. The memory hard limit can be specified in several places. It must be specified for each node at least once.", + "LogConfiguration": "The log configuration specification for the container.\n\nThis parameter maps to `LogConfig` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/) and the `--log-driver` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) . By default, containers use the same logging driver that the Docker daemon uses. However the container might use a different logging driver than the Docker daemon by specifying a log driver with this parameter in the container definition. To use a different logging driver for a container, the log system must be configured properly on the container instance (or on a different log server for remote logging options). For more information on the options for different supported log drivers, see [Configure logging drivers](https://docs.aws.amazon.com/https://docs.docker.com/engine/admin/logging/overview/) in the Docker documentation.\n\n> AWS Batch currently supports a subset of the logging drivers available to the Docker daemon (shown in the [LogConfiguration](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-logconfiguration.html) data type). \n\nThis parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version | grep \"Server API version\"`\n\n> The Amazon ECS container agent running on a container instance must register the logging drivers available on that instance with the `ECS_AVAILABLE_LOGGING_DRIVERS` environment variable before containers placed on that instance can use these log configuration options. For more information, see [Amazon ECS container agent configuration](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) in the *Amazon Elastic Container Service Developer Guide* .", + "Memory": "This parameter is deprecated, use `resourceRequirements` to specify the memory requirements for the job definition. It's not supported for jobs running on Fargate resources. For jobs that run on Amazon EC2 resources, it specifies the memory hard limit (in MiB) for a container. If your container attempts to exceed the specified number, it's terminated. You must specify at least 4 MiB of memory for a job using this parameter. The memory hard limit can be specified in several places. It must be specified for each node at least once.", "MountPoints": "The mount points for data volumes in your container. This parameter maps to `Volumes` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/) and the `--volume` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) .", - "NetworkConfiguration": "The network configuration for jobs that are running on Fargate resources. Jobs that are running on EC2 resources must not specify this parameter.", + "NetworkConfiguration": "The network configuration for jobs that are running on Fargate resources. Jobs that are running on Amazon EC2 resources must not specify this parameter.", "Privileged": "When this parameter is true, the container is given elevated permissions on the host container instance (similar to the `root` user). This parameter maps to `Privileged` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/) and the `--privileged` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) . The default value is false.\n\n> This parameter isn't applicable to jobs that are running on Fargate resources and shouldn't be provided, or specified as false.", "ReadonlyRootFilesystem": "When this parameter is true, the container is given read-only access to its root file system. This parameter maps to `ReadonlyRootfs` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/) and the `--read-only` option to `docker run` .", + "RepositoryCredentials": "The private repository authentication credentials to use.", "ResourceRequirements": "The type and amount of resources to assign to a container. The supported resources include `GPU` , `MEMORY` , and `VCPU` .", "RuntimePlatform": "An object that represents the compute environment architecture for AWS Batch jobs on Fargate.", "Secrets": "The secrets for the container. For more information, see [Specifying sensitive data](https://docs.aws.amazon.com/batch/latest/userguide/specifying-sensitive-data.html) in the *AWS Batch User Guide* .", "Ulimits": "A list of `ulimits` to set in the container. This parameter maps to `Ulimits` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/) and the `--ulimit` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) .\n\n> This parameter isn't applicable to jobs that are running on Fargate resources and shouldn't be provided.", "User": "The user name to use inside the container. This parameter maps to `User` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/) and the `--user` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) .", - "Vcpus": "This parameter is deprecated, use `resourceRequirements` to specify the vCPU requirements for the job definition. It's not supported for jobs running on Fargate resources. For jobs running on EC2 resources, it specifies the number of vCPUs reserved for the job.\n\nEach vCPU is equivalent to 1,024 CPU shares. This parameter maps to `CpuShares` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/) and the `--cpu-shares` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) . The number of vCPUs must be specified but can be specified in several places. You must specify it at least once for each node.", + "Vcpus": "This parameter is deprecated, use `resourceRequirements` to specify the vCPU requirements for the job definition. It's not supported for jobs running on Fargate resources. For jobs running on Amazon EC2 resources, it specifies the number of vCPUs reserved for the job.\n\nEach vCPU is equivalent to 1,024 CPU shares. This parameter maps to `CpuShares` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/) and the `--cpu-shares` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) . The number of vCPUs must be specified but can be specified in several places. You must specify it at least once for each node.", "Volumes": "A list of data volumes used in a job." }, "AWS::Batch::JobDefinition Device": { @@ -4231,7 +4252,7 @@ "TransitEncryptionPort": "The port to use when sending encrypted data between the Amazon ECS host and the Amazon EFS server. If you don't specify a transit encryption port, it uses the port selection strategy that the Amazon EFS mount helper uses. The value must be between 0 and 65,535. For more information, see [EFS mount helper](https://docs.aws.amazon.com/efs/latest/ug/efs-mount-helper.html) in the *Amazon Elastic File System User Guide* ." }, "AWS::Batch::JobDefinition EksContainer": { - "Args": "An array of arguments to the entrypoint. If this isn't specified, the `CMD` of the container image is used. This corresponds to the `args` member in the [Entrypoint](https://docs.aws.amazon.com/https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#entrypoint) portion of the [Pod](https://docs.aws.amazon.com/https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/) in Kubernetes. Environment variable references are expanded using the container's environment.\n\nIf the referenced environment variable doesn't exist, the reference in the command isn't changed. For example, if the reference is to \" `$(NAME1)` \" and the `NAME1` environment variable doesn't exist, the command string will remain \" `$(NAME1)` .\" `$$` is replaced with `$` , and the resulting string isn't expanded. For example, `$$(VAR_NAME)` is passed as `$(VAR_NAME)` whether or not the `VAR_NAME` environment variable exists. For more information, see [CMD](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/builder/#cmd) in the *Dockerfile reference* and [Define a command and arguments for a pod](https://docs.aws.amazon.com/https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/) in the *Kubernetes documentation* .", + "Args": "An array of arguments to the entrypoint. If this isn't specified, the `CMD` of the container image is used. This corresponds to the `args` member in the [Entrypoint](https://docs.aws.amazon.com/https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#entrypoint) portion of the [Pod](https://docs.aws.amazon.com/https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/) in Kubernetes. Environment variable references are expanded using the container's environment.\n\nIf the referenced environment variable doesn't exist, the reference in the command isn't changed. For example, if the reference is to \" `$(NAME1)` \" and the `NAME1` environment variable doesn't exist, the command string will remain \" `$(NAME1)` .\" `$$` is replaced with `$` , and the resulting string isn't expanded. For example, `$$(VAR_NAME)` is passed as `$(VAR_NAME)` whether or not the `VAR_NAME` environment variable exists. For more information, see [Dockerfile reference: CMD](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/builder/#cmd) and [Define a command and arguments for a pod](https://docs.aws.amazon.com/https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/) in the *Kubernetes documentation* .", "Command": "The entrypoint for the container. This isn't run within a shell. If this isn't specified, the `ENTRYPOINT` of the container image is used. Environment variable references are expanded using the container's environment.\n\nIf the referenced environment variable doesn't exist, the reference in the command isn't changed. For example, if the reference is to \" `$(NAME1)` \" and the `NAME1` environment variable doesn't exist, the command string will remain \" `$(NAME1)` .\" `$$` is replaced with `$` and the resulting string isn't expanded. For example, `$$(VAR_NAME)` will be passed as `$(VAR_NAME)` whether or not the `VAR_NAME` environment variable exists. The entrypoint can't be updated. For more information, see [ENTRYPOINT](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/builder/#entrypoint) in the *Dockerfile reference* and [Define a command and arguments for a container](https://docs.aws.amazon.com/https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/) and [Entrypoint](https://docs.aws.amazon.com/https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#entrypoint) in the *Kubernetes documentation* .", "Env": "The environment variables to pass to a container.\n\n> Environment variables cannot start with \" `AWS_BATCH` \". This naming convention is reserved for variables that AWS Batch sets.", "Image": "The Docker image used to start the container.", @@ -4311,7 +4332,7 @@ "SecretOptions": "The secrets to pass to the log configuration. For more information, see [Specifying sensitive data](https://docs.aws.amazon.com/batch/latest/userguide/specifying-sensitive-data.html) in the *AWS Batch User Guide* ." }, "AWS::Batch::JobDefinition Metadata": { - "Labels": "" + "Labels": "Key-value pairs used to identify, sort, and organize cube resources. Can contain up to 63 uppercase letters, lowercase letters, numbers, hyphens (-), and underscores (_). Labels can be added or modified at any time. Each resource can have multiple labels, but each key must be unique for a given object." }, "AWS::Batch::JobDefinition MountPoints": { "ContainerPath": "The path on the container where the host volume is mounted.", @@ -4331,16 +4352,19 @@ "TargetNodes": "The range of nodes, using node index values. A range of `0:3` indicates nodes with index values of `0` through `3` . If the starting range value is omitted ( `:n` ), then `0` is used to start the range. If the ending range value is omitted ( `n:` ), then the highest possible node index is used to end the range. Your accumulative node ranges must account for all nodes ( `0:n` ). You can nest node ranges (for example, `0:10` and `4:5` ). In this case, the `4:5` range properties override the `0:10` properties." }, "AWS::Batch::JobDefinition PodProperties": { - "Containers": "", - "DnsPolicy": "", - "HostNetwork": "", - "Metadata": "", - "ServiceAccountName": "", - "Volumes": "" + "Containers": "The properties of the container that's used on the Amazon EKS pod.", + "DnsPolicy": "The DNS policy for the pod. The default value is `ClusterFirst` . If the `hostNetwork` parameter is not specified, the default is `ClusterFirstWithHostNet` . `ClusterFirst` indicates that any DNS query that does not match the configured cluster domain suffix is forwarded to the upstream nameserver inherited from the node. For more information, see [Pod's DNS policy](https://docs.aws.amazon.com/https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy) in the *Kubernetes documentation* .\n\nValid values: `Default` | `ClusterFirst` | `ClusterFirstWithHostNet`", + "HostNetwork": "Indicates if the pod uses the hosts' network IP address. The default value is `true` . Setting this to `false` enables the Kubernetes pod networking model. Most AWS Batch workloads are egress-only and don't require the overhead of IP allocation for each pod for incoming connections. For more information, see [Host namespaces](https://docs.aws.amazon.com/https://kubernetes.io/docs/concepts/security/pod-security-policy/#host-namespaces) and [Pod networking](https://docs.aws.amazon.com/https://kubernetes.io/docs/concepts/workloads/pods/#pod-networking) in the *Kubernetes documentation* .", + "Metadata": "Metadata about the Kubernetes pod. For more information, see [Understanding Kubernetes Objects](https://docs.aws.amazon.com/https://kubernetes.io/docs/concepts/overview/working-with-objects/kubernetes-objects/) in the *Kubernetes documentation* .", + "ServiceAccountName": "The name of the service account that's used to run the pod. For more information, see [Kubernetes service accounts](https://docs.aws.amazon.com/eks/latest/userguide/service-accounts.html) and [Configure a Kubernetes service account to assume an IAM role](https://docs.aws.amazon.com/eks/latest/userguide/associate-service-account-role.html) in the *Amazon EKS User Guide* and [Configure service accounts for pods](https://docs.aws.amazon.com/https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) in the *Kubernetes documentation* .", + "Volumes": "Specifies the volumes for a job definition that uses Amazon EKS resources." + }, + "AWS::Batch::JobDefinition RepositoryCredentials": { + "CredentialsParameter": "The Amazon Resource Name (ARN) of the secret containing the private repository credentials." }, "AWS::Batch::JobDefinition ResourceRequirement": { "Type": "The type of resource to assign to a container. The supported resources include `GPU` , `MEMORY` , and `VCPU` .", - "Value": "The quantity of the specified resource to reserve for the container. The values vary based on the `type` specified.\n\n- **type=\"GPU\"** - The number of physical GPUs to reserve for the container. Make sure that the number of GPUs reserved for all containers in a job doesn't exceed the number of available GPUs on the compute resource that the job is launched on.\n\n> GPUs aren't available for jobs that are running on Fargate resources.\n- **type=\"MEMORY\"** - The memory hard limit (in MiB) present to the container. This parameter is supported for jobs that are running on EC2 resources. If your container attempts to exceed the memory specified, the container is terminated. This parameter maps to `Memory` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/) and the `--memory` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) . You must specify at least 4 MiB of memory for a job. This is required but can be specified in several places for multi-node parallel (MNP) jobs. It must be specified for each node at least once. This parameter maps to `Memory` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/) and the `--memory` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) .\n\n> If you're trying to maximize your resource utilization by providing your jobs as much memory as possible for a particular instance type, see [Memory management](https://docs.aws.amazon.com/batch/latest/userguide/memory-management.html) in the *AWS Batch User Guide* . \n\nFor jobs that are running on Fargate resources, then `value` is the hard limit (in MiB), and must match one of the supported values and the `VCPU` values must be one of the values supported for that memory value.\n\n- **value = 512** - `VCPU` = 0.25\n- **value = 1024** - `VCPU` = 0.25 or 0.5\n- **value = 2048** - `VCPU` = 0.25, 0.5, or 1\n- **value = 3072** - `VCPU` = 0.5, or 1\n- **value = 4096** - `VCPU` = 0.5, 1, or 2\n- **value = 5120, 6144, or 7168** - `VCPU` = 1 or 2\n- **value = 8192** - `VCPU` = 1, 2, or 4\n- **value = 9216, 10240, 11264, 12288, 13312, 14336, or 15360** - `VCPU` = 2 or 4\n- **value = 16384** - `VCPU` = 2, 4, or 8\n- **value = 17408, 18432, 19456, 21504, 22528, 23552, 25600, 26624, 27648, 29696, or 30720** - `VCPU` = 4\n- **value = 20480, 24576, or 28672** - `VCPU` = 4 or 8\n- **value = 36864, 45056, 53248, or 61440** - `VCPU` = 8\n- **value = 32768, 40960, 49152, or 57344** - `VCPU` = 8 or 16\n- **value = 65536, 73728, 81920, 90112, 98304, 106496, 114688, or 122880** - `VCPU` = 16\n- **type=\"VCPU\"** - The number of vCPUs reserved for the container. This parameter maps to `CpuShares` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/) and the `--cpu-shares` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) . Each vCPU is equivalent to 1,024 CPU shares. For EC2 resources, you must specify at least one vCPU. This is required but can be specified in several places; it must be specified for each node at least once.\n\nThe default for the Fargate On-Demand vCPU resource count quota is 6 vCPUs. For more information about Fargate quotas, see [AWS Fargate quotas](https://docs.aws.amazon.com/general/latest/gr/ecs-service.html#service-quotas-fargate) in the *AWS General Reference* .\n\nFor jobs that are running on Fargate resources, then `value` must match one of the supported values and the `MEMORY` values must be one of the values supported for that `VCPU` value. The supported values are 0.25, 0.5, 1, 2, 4, 8, and 16\n\n- **value = 0.25** - `MEMORY` = 512, 1024, or 2048\n- **value = 0.5** - `MEMORY` = 1024, 2048, 3072, or 4096\n- **value = 1** - `MEMORY` = 2048, 3072, 4096, 5120, 6144, 7168, or 8192\n- **value = 2** - `MEMORY` = 4096, 5120, 6144, 7168, 8192, 9216, 10240, 11264, 12288, 13312, 14336, 15360, or 16384\n- **value = 4** - `MEMORY` = 8192, 9216, 10240, 11264, 12288, 13312, 14336, 15360, 16384, 17408, 18432, 19456, 20480, 21504, 22528, 23552, 24576, 25600, 26624, 27648, 28672, 29696, or 30720\n- **value = 8** - `MEMORY` = 16384, 20480, 24576, 28672, 32768, 36864, 40960, 45056, 49152, 53248, 57344, or 61440\n- **value = 16** - `MEMORY` = 32768, 40960, 49152, 57344, 65536, 73728, 81920, 90112, 98304, 106496, 114688, or 122880" + "Value": "The quantity of the specified resource to reserve for the container. The values vary based on the `type` specified.\n\n- **type=\"GPU\"** - The number of physical GPUs to reserve for the container. Make sure that the number of GPUs reserved for all containers in a job doesn't exceed the number of available GPUs on the compute resource that the job is launched on.\n\n> GPUs aren't available for jobs that are running on Fargate resources.\n- **type=\"MEMORY\"** - The memory hard limit (in MiB) present to the container. This parameter is supported for jobs that are running on Amazon EC2 resources. If your container attempts to exceed the memory specified, the container is terminated. This parameter maps to `Memory` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/) and the `--memory` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) . You must specify at least 4 MiB of memory for a job. This is required but can be specified in several places for multi-node parallel (MNP) jobs. It must be specified for each node at least once. This parameter maps to `Memory` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/) and the `--memory` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) .\n\n> If you're trying to maximize your resource utilization by providing your jobs as much memory as possible for a particular instance type, see [Memory management](https://docs.aws.amazon.com/batch/latest/userguide/memory-management.html) in the *AWS Batch User Guide* . \n\nFor jobs that are running on Fargate resources, then `value` is the hard limit (in MiB), and must match one of the supported values and the `VCPU` values must be one of the values supported for that memory value.\n\n- **value = 512** - `VCPU` = 0.25\n- **value = 1024** - `VCPU` = 0.25 or 0.5\n- **value = 2048** - `VCPU` = 0.25, 0.5, or 1\n- **value = 3072** - `VCPU` = 0.5, or 1\n- **value = 4096** - `VCPU` = 0.5, 1, or 2\n- **value = 5120, 6144, or 7168** - `VCPU` = 1 or 2\n- **value = 8192** - `VCPU` = 1, 2, or 4\n- **value = 9216, 10240, 11264, 12288, 13312, 14336, or 15360** - `VCPU` = 2 or 4\n- **value = 16384** - `VCPU` = 2, 4, or 8\n- **value = 17408, 18432, 19456, 21504, 22528, 23552, 25600, 26624, 27648, 29696, or 30720** - `VCPU` = 4\n- **value = 20480, 24576, or 28672** - `VCPU` = 4 or 8\n- **value = 36864, 45056, 53248, or 61440** - `VCPU` = 8\n- **value = 32768, 40960, 49152, or 57344** - `VCPU` = 8 or 16\n- **value = 65536, 73728, 81920, 90112, 98304, 106496, 114688, or 122880** - `VCPU` = 16\n- **type=\"VCPU\"** - The number of vCPUs reserved for the container. This parameter maps to `CpuShares` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/) and the `--cpu-shares` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) . Each vCPU is equivalent to 1,024 CPU shares. For Amazon EC2 resources, you must specify at least one vCPU. This is required but can be specified in several places; it must be specified for each node at least once.\n\nThe default for the Fargate On-Demand vCPU resource count quota is 6 vCPUs. For more information about Fargate quotas, see [AWS Fargate quotas](https://docs.aws.amazon.com/general/latest/gr/ecs-service.html#service-quotas-fargate) in the *AWS General Reference* .\n\nFor jobs that are running on Fargate resources, then `value` must match one of the supported values and the `MEMORY` values must be one of the values supported for that `VCPU` value. The supported values are 0.25, 0.5, 1, 2, 4, 8, and 16\n\n- **value = 0.25** - `MEMORY` = 512, 1024, or 2048\n- **value = 0.5** - `MEMORY` = 1024, 2048, 3072, or 4096\n- **value = 1** - `MEMORY` = 2048, 3072, 4096, 5120, 6144, 7168, or 8192\n- **value = 2** - `MEMORY` = 4096, 5120, 6144, 7168, 8192, 9216, 10240, 11264, 12288, 13312, 14336, 15360, or 16384\n- **value = 4** - `MEMORY` = 8192, 9216, 10240, 11264, 12288, 13312, 14336, 15360, 16384, 17408, 18432, 19456, 20480, 21504, 22528, 23552, 24576, 25600, 26624, 27648, 28672, 29696, or 30720\n- **value = 8** - `MEMORY` = 16384, 20480, 24576, 28672, 32768, 36864, 40960, 45056, 49152, 53248, 57344, or 61440\n- **value = 16** - `MEMORY` = 32768, 40960, 49152, 57344, 65536, 73728, 81920, 90112, 98304, 106496, 114688, or 122880" }, "AWS::Batch::JobDefinition RetryStrategy": { "Attempts": "The number of times to move a job to the `RUNNABLE` status. You can specify between 1 and 10 attempts. If the value of `attempts` is greater than one, the job is retried on failure the same number of attempts as the value.", @@ -5254,7 +5278,7 @@ "DefaultCacheBehavior": "A complex type that describes the default cache behavior if you don't specify a `CacheBehavior` element or if files don't match any of the values of `PathPattern` in `CacheBehavior` elements. You must create exactly one default cache behavior.", "DefaultRootObject": "The object that you want CloudFront to request from your origin (for example, `index.html` ) when a viewer requests the root URL for your distribution ( `https://www.example.com` ) instead of an object in your distribution ( `https://www.example.com/product-description.html` ). Specifying a default root object avoids exposing the contents of your distribution.\n\nSpecify only the object name, for example, `index.html` . Don't add a `/` before the object name.\n\nIf you don't want to specify a default root object when you create a distribution, include an empty `DefaultRootObject` element.\n\nTo delete the default root object from an existing distribution, update the distribution configuration and include an empty `DefaultRootObject` element.\n\nTo replace the default root object, update the distribution configuration and specify the new object.\n\nFor more information about the default root object, see [Creating a Default Root Object](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/DefaultRootObject.html) in the *Amazon CloudFront Developer Guide* .", "Enabled": "From this field, you can enable or disable the selected distribution.", - "HttpVersion": "(Optional) Specify the maximum HTTP version(s) that you want viewers to use to communicate with CloudFront . The default value for new distributions is `http1.1` .\n\nFor viewers and CloudFront to use HTTP/2, viewers must support TLSv1.2 or later, and must support Server Name Indication (SNI).\n\nFor viewers and CloudFront to use HTTP/3, viewers must support TLSv1.3 and Server Name Indication (SNI). CloudFront supports HTTP/3 connection migration to allow the viewer to switch networks without losing connection. For more information about connection migration, see [Connection Migration](https://docs.aws.amazon.com/https://www.rfc-editor.org/rfc/rfc9000.html#name-connection-migration) at RFC 9000. For more information about supported TLSv1.3 ciphers, see [Supported protocols and ciphers between viewers and CloudFront](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/secure-connections-supported-viewer-protocols-ciphers.html) .", + "HttpVersion": "(Optional) Specify the HTTP version(s) that you want viewers to use to communicate with CloudFront . The default value for new distributions is `http1.1` .\n\nFor viewers and CloudFront to use HTTP/2, viewers must support TLSv1.2 or later, and must support Server Name Indication (SNI).\n\nFor viewers and CloudFront to use HTTP/3, viewers must support TLSv1.3 and Server Name Indication (SNI). CloudFront supports HTTP/3 connection migration to allow the viewer to switch networks without losing connection. For more information about connection migration, see [Connection Migration](https://docs.aws.amazon.com/https://www.rfc-editor.org/rfc/rfc9000.html#name-connection-migration) at RFC 9000. For more information about supported TLSv1.3 ciphers, see [Supported protocols and ciphers between viewers and CloudFront](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/secure-connections-supported-viewer-protocols-ciphers.html) .", "IPV6Enabled": "If you want CloudFront to respond to IPv6 DNS requests with an IPv6 address for your distribution, specify `true` . If you specify `false` , CloudFront responds to IPv6 DNS requests with the DNS response code `NOERROR` and with no IP addresses. This allows viewers to submit a second request, for an IPv4 address for your distribution.\n\nIn general, you should enable IPv6 if you have users on IPv6 networks who want to access your content. However, if you're using signed URLs or signed cookies to restrict access to your content, and if you're using a custom policy that includes the `IpAddress` parameter to restrict the IP addresses that can access your content, don't enable IPv6. If you want to restrict access to some content by IP address and not restrict access to other content (or restrict access but not by IP address), you can create two distributions. For more information, see [Creating a Signed URL Using a Custom Policy](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-creating-signed-url-custom-policy.html) in the *Amazon CloudFront Developer Guide* .\n\nIf you're using an Amazon Route\u00a053 AWS Integration alias resource record set to route traffic to your CloudFront distribution, you need to create a second alias resource record set when both of the following are true:\n\n- You enable IPv6 for the distribution\n- You're using alternate domain names in the URLs for your objects\n\nFor more information, see [Routing Traffic to an Amazon CloudFront Web Distribution by Using Your Domain Name](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-cloudfront-distribution.html) in the *Amazon Route\u00a053 AWS Integration Developer Guide* .\n\nIf you created a CNAME resource record set, either with Amazon Route\u00a053 AWS Integration or with another DNS service, you don't need to make any changes. A CNAME record will route traffic to your distribution regardless of the IP address format of the viewer request.", "Logging": "A complex type that controls whether access logs are written for the distribution.\n\nFor more information about logging, see [Access Logs](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/AccessLogs.html) in the *Amazon CloudFront Developer Guide* .", "OriginGroups": "A complex type that contains information about origin groups for this distribution.", @@ -5344,7 +5368,7 @@ "GeoRestriction": "A complex type that controls the countries in which your content is distributed. CloudFront determines the location of your users using `MaxMind` GeoIP databases. To disable geo restriction, remove the [Restrictions](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudfront-distribution-distributionconfig.html#cfn-cloudfront-distribution-distributionconfig-restrictions) property from your stack template." }, "AWS::CloudFront::Distribution S3OriginConfig": { - "OriginAccessIdentity": "The CloudFront origin access identity to associate with the origin. Use an origin access identity to configure the origin so that viewers can *only* access objects in an Amazon S3 bucket through CloudFront. The format of the value is:\n\norigin-access-identity/cloudfront/ *ID-of-origin-access-identity*\n\nwhere `*ID-of-origin-access-identity*` is the value that CloudFront returned in the `ID` element when you created the origin access identity.\n\nIf you want viewers to be able to access objects using either the CloudFront URL or the Amazon S3 URL, specify an empty `OriginAccessIdentity` element.\n\nTo delete the origin access identity from an existing distribution, update the distribution configuration and include an empty `OriginAccessIdentity` element.\n\nTo replace the origin access identity, update the distribution configuration and specify the new origin access identity.\n\nFor more information about the origin access identity, see [Serving Private Content through CloudFront](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/PrivateContent.html) in the *Amazon CloudFront Developer Guide* ." + "OriginAccessIdentity": "> If you're using origin access control (OAC) instead of origin access identity, specify an empty `OriginAccessIdentity` element. For more information, see [Restricting access to an AWS](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-restricting-access-to-origin.html) in the *Amazon CloudFront Developer Guide* . \n\nThe CloudFront origin access identity to associate with the origin. Use an origin access identity to configure the origin so that viewers can *only* access objects in an Amazon S3 bucket through CloudFront. The format of the value is:\n\n`origin-access-identity/cloudfront/ID-of-origin-access-identity`\n\nThe `*ID-of-origin-access-identity*` is the value that CloudFront returned in the `ID` element when you created the origin access identity.\n\nIf you want viewers to be able to access objects using either the CloudFront URL or the Amazon S3 URL, specify an empty `OriginAccessIdentity` element.\n\nTo delete the origin access identity from an existing distribution, update the distribution configuration and include an empty `OriginAccessIdentity` element.\n\nTo replace the origin access identity, update the distribution configuration and specify the new origin access identity.\n\nFor more information about the origin access identity, see [Serving Private Content through CloudFront](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/PrivateContent.html) in the *Amazon CloudFront Developer Guide* ." }, "AWS::CloudFront::Distribution StatusCodes": { "Items": "The items (status codes) for an origin group.", @@ -5614,7 +5638,7 @@ "AWS::CloudTrail::EventDataStore AdvancedFieldSelector": { "EndsWith": "An operator that includes events that match the last few characters of the event record field specified as the value of `Field` .", "Equals": "An operator that includes events that match the exact value of the event record field specified as the value of `Field` . This is the only valid operator that you can use with the `readOnly` , `eventCategory` , and `resources.type` fields.", - "Field": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `readOnly` , `eventCategory` , and `eventSource` .\n\nFor CloudTrail data events, supported fields include `readOnly` , `eventCategory` , `eventName` , `resources.type` , and `resources.ARN` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - Optional. Can be set to `Equals` a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - For filtering management events only. This can be set to `NotEquals` `kms.amazonaws.com` or `NotEquals` `rdsdata.amazonaws.com` .\n- *`eventName`* - Can use any operator. You can use it to \ufb01lter in or \ufb01lter out any data event logged to CloudTrail, such as `PutBucket` or `GetSnapshotBlock` . You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator, and the value can be one of the following:\n\n- `AWS::DynamoDB::Table`\n- `AWS::Lambda::Function`\n- `AWS::S3::Object`\n- `AWS::AppConfig::Configuration`\n- `AWS::B2BI::Transformer`\n- `AWS::Bedrock::AgentAlias`\n- `AWS::Bedrock::KnowledgeBase`\n- `AWS::Cassandra::Table`\n- `AWS::CloudFront::KeyValueStore`\n- `AWS::CloudTrail::Channel`\n- `AWS::CodeWhisperer::Customization`\n- `AWS::CodeWhisperer::Profile`\n- `AWS::Cognito::IdentityPool`\n- `AWS::DynamoDB::Stream`\n- `AWS::EC2::Snapshot`\n- `AWS::EMRWAL::Workspace`\n- `AWS::FinSpace::Environment`\n- `AWS::Glue::Table`\n- `AWS::GreengrassV2::ComponentVersion`\n- `AWS::GreengrassV2::Deployment`\n- `AWS::GuardDuty::Detector`\n- `AWS::IoT::Certificate`\n- `AWS::IoT::Thing`\n- `AWS::IoTSiteWise::Asset`\n- `AWS::IoTSiteWise::TimeSeries`\n- `AWS::IoTTwinMaker::Entity`\n- `AWS::IoTTwinMaker::Workspace`\n- `AWS::KendraRanking::ExecutionPlan`\n- `AWS::KinesisVideo::Stream`\n- `AWS::ManagedBlockchain::Network`\n- `AWS::ManagedBlockchain::Node`\n- `AWS::MedicalImaging::Datastore`\n- `AWS::NeptuneGraph::Graph`\n- `AWS::PCAConnectorAD::Connector`\n- `AWS::QBusiness::Application`\n- `AWS::QBusiness::DataSource`\n- `AWS::QBusiness::Index`\n- `AWS::QBusiness::WebExperience`\n- `AWS::RDS::DBCluster`\n- `AWS::S3::AccessPoint`\n- `AWS::S3ObjectLambda::AccessPoint`\n- `AWS::S3Outposts::Object`\n- `AWS::SageMaker::Endpoint`\n- `AWS::SageMaker::ExperimentTrialComponent`\n- `AWS::SageMaker::FeatureGroup`\n- `AWS::ServiceDiscovery::Namespace`\n- `AWS::ServiceDiscovery::Service`\n- `AWS::SCN::Instance`\n- `AWS::SNS::PlatformEndpoint`\n- `AWS::SNS::Topic`\n- `AWS::SQS::Queue`\n- `AWS::SSMMessages::ControlChannel`\n- `AWS::ThinClient::Device`\n- `AWS::ThinClient::Environment`\n- `AWS::Timestream::Database`\n- `AWS::Timestream::Table`\n- `AWS::VerifiedPermissions::PolicyStore`\n\nYou can have only one `resources.type` \ufb01eld per selector. To log data events on more than one resource type, add another selector.\n- *`resources.ARN`* - You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. For example, if resources.type equals `AWS::S3::Object` , the ARN must be in one of the following formats. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value.\n\nThe trailing slash is intentional; do not exclude it. Replace the text between less than and greater than symbols (<>) with resource-specific information.\n\n- `arn::s3:::/`\n- `arn::s3::://`\n\nWhen resources.type equals `AWS::DynamoDB::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::dynamodb:::table/`\n\nWhen resources.type equals `AWS::Lambda::Function` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::lambda:::function:`\n\nWhen resources.type equals `AWS::AppConfig::Configuration` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::appconfig:::application//environment//configuration/`\n\nWhen resources.type equals `AWS::B2BI::Transformer` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::b2bi:::transformer/`\n\nWhen resources.type equals `AWS::Bedrock::AgentAlias` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::bedrock:::agent-alias//`\n\nWhen resources.type equals `AWS::Bedrock::KnowledgeBase` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::bedrock:::knowledge-base/`\n\nWhen resources.type equals `AWS::Cassandra::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cassandra:::/keyspace//table/`\n\nWhen resources.type equals `AWS::CloudFront::KeyValueStore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cloudfront:::key-value-store/`\n\nWhen resources.type equals `AWS::CloudTrail::Channel` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cloudtrail:::channel/`\n\nWhen resources.type equals `AWS::CodeWhisperer::Customization` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::codewhisperer:::customization/`\n\nWhen resources.type equals `AWS::CodeWhisperer::Profile` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::codewhisperer:::profile/`\n\nWhen resources.type equals `AWS::Cognito::IdentityPool` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cognito-identity:::identitypool/`\n\nWhen `resources.type` equals `AWS::DynamoDB::Stream` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::dynamodb:::table//stream/`\n\nWhen `resources.type` equals `AWS::EC2::Snapshot` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::ec2:::snapshot/`\n\nWhen `resources.type` equals `AWS::EMRWAL::Workspace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::emrwal:::workspace/`\n\nWhen `resources.type` equals `AWS::FinSpace::Environment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::finspace:::environment/`\n\nWhen `resources.type` equals `AWS::Glue::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::glue:::table//`\n\nWhen `resources.type` equals `AWS::GreengrassV2::ComponentVersion` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::greengrass:::components/`\n\nWhen `resources.type` equals `AWS::GreengrassV2::Deployment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::greengrass:::deployments/:guardduty:::detector/`\n\nWhen `resources.type` equals `AWS::IoT::Certificate` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iot:::cert/`\n\nWhen `resources.type` equals `AWS::IoT::Thing` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iot:::thing/`\n\nWhen `resources.type` equals `AWS::IoTSiteWise::Asset` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iotsitewise:::asset/`\n\nWhen `resources.type` equals `AWS::IoTSiteWise::TimeSeries` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iotsitewise:::timeseries/`\n\nWhen `resources.type` equals `AWS::IoTTwinMaker::Entity` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iottwinmaker:::workspace//entity/`\n\nWhen `resources.type` equals `AWS::IoTTwinMaker::Workspace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iottwinmaker:::workspace/`\n\nWhen `resources.type` equals `AWS::KendraRanking::ExecutionPlan` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::kendra-ranking:::rescore-execution-plan/`\n\nWhen `resources.type` equals `AWS::KinesisVideo::Stream` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::kinesisvideo:::stream//`\n\nWhen `resources.type` equals `AWS::ManagedBlockchain::Network` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::managedblockchain:::networks/`\n\nWhen `resources.type` equals `AWS::ManagedBlockchain::Node` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::managedblockchain:::nodes/`\n\nWhen `resources.type` equals `AWS::MedicalImaging::Datastore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::medical-imaging:::datastore/`\n\nWhen `resources.type` equals `AWS::NeptuneGraph::Graph` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::neptune-graph:::graph/`\n\nWhen `resources.type` equals `AWS::PCAConnectorAD::Connector` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::pca-connector-ad:::connector/`\n\nWhen `resources.type` equals `AWS::QBusiness::Application` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application/`\n\nWhen `resources.type` equals `AWS::QBusiness::DataSource` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//index//data-source/`\n\nWhen `resources.type` equals `AWS::QBusiness::Index` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//index/`\n\nWhen `resources.type` equals `AWS::QBusiness::WebExperience` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//web-experience/`\n\nWhen `resources.type` equals `AWS::RDS::DBCluster` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::rds:::cluster/`\n\nWhen `resources.type` equals `AWS::S3::AccessPoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in one of the following formats. To log events on all objects in an S3 access point, we recommend that you use only the access point ARN, don\u2019t include the object path, and use the `StartsWith` or `NotStartsWith` operators.\n\n- `arn::s3:::accesspoint/`\n- `arn::s3:::accesspoint//object/`\n\nWhen `resources.type` equals `AWS::S3ObjectLambda::AccessPoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::s3-object-lambda:::accesspoint/`\n\nWhen `resources.type` equals `AWS::S3Outposts::Object` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::s3-outposts:::`\n\nWhen `resources.type` equals `AWS::SageMaker::Endpoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::endpoint/`\n\nWhen `resources.type` equals `AWS::SageMaker::ExperimentTrialComponent` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::experiment-trial-component/`\n\nWhen `resources.type` equals `AWS::SageMaker::FeatureGroup` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::feature-group/`\n\nWhen `resources.type` equals `AWS::SCN::Instance` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::scn:::instance/`\n\nWhen `resources.type` equals `AWS::ServiceDiscovery::Namespace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::servicediscovery:::namespace/`\n\nWhen `resources.type` equals `AWS::ServiceDiscovery::Service` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::servicediscovery:::service/`\n\nWhen `resources.type` equals `AWS::SNS::PlatformEndpoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sns:::endpoint///`\n\nWhen `resources.type` equals `AWS::SNS::Topic` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sns:::`\n\nWhen `resources.type` equals `AWS::SQS::Queue` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sqs:::`\n\nWhen `resources.type` equals `AWS::SSMMessages::ControlChannel` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::ssmmessages:::control-channel/`\n\nWhen `resources.type` equals `AWS::ThinClient::Device` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::thinclient:::device/`\n\nWhen `resources.type` equals `AWS::ThinClient::Environment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::thinclient:::environment/`\n\nWhen `resources.type` equals `AWS::Timestream::Database` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::timestream:::database/`\n\nWhen `resources.type` equals `AWS::Timestream::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::timestream:::database//table/`\n\nWhen resources.type equals `AWS::VerifiedPermissions::PolicyStore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::verifiedpermissions:::policy-store/`", + "Field": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `readOnly` , `eventCategory` , and `eventSource` .\n\nFor CloudTrail data events, supported fields include `readOnly` , `eventCategory` , `eventName` , `resources.type` , and `resources.ARN` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - Optional. Can be set to `Equals` a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - For filtering management events only. This can be set to `NotEquals` `kms.amazonaws.com` or `NotEquals` `rdsdata.amazonaws.com` .\n- *`eventName`* - Can use any operator. You can use it to \ufb01lter in or \ufb01lter out any data event logged to CloudTrail, such as `PutBucket` or `GetSnapshotBlock` . You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator, and the value can be one of the following:\n\n- `AWS::DynamoDB::Table`\n- `AWS::Lambda::Function`\n- `AWS::S3::Object`\n- `AWS::AppConfig::Configuration`\n- `AWS::B2BI::Transformer`\n- `AWS::Bedrock::AgentAlias`\n- `AWS::Bedrock::KnowledgeBase`\n- `AWS::Cassandra::Table`\n- `AWS::CloudFront::KeyValueStore`\n- `AWS::CloudTrail::Channel`\n- `AWS::CodeWhisperer::Customization`\n- `AWS::CodeWhisperer::Profile`\n- `AWS::Cognito::IdentityPool`\n- `AWS::DynamoDB::Stream`\n- `AWS::EC2::Snapshot`\n- `AWS::EMRWAL::Workspace`\n- `AWS::FinSpace::Environment`\n- `AWS::Glue::Table`\n- `AWS::GreengrassV2::ComponentVersion`\n- `AWS::GreengrassV2::Deployment`\n- `AWS::GuardDuty::Detector`\n- `AWS::IoT::Certificate`\n- `AWS::IoT::Thing`\n- `AWS::IoTSiteWise::Asset`\n- `AWS::IoTSiteWise::TimeSeries`\n- `AWS::IoTTwinMaker::Entity`\n- `AWS::IoTTwinMaker::Workspace`\n- `AWS::KendraRanking::ExecutionPlan`\n- `AWS::KinesisVideo::Stream`\n- `AWS::ManagedBlockchain::Network`\n- `AWS::ManagedBlockchain::Node`\n- `AWS::MedicalImaging::Datastore`\n- `AWS::NeptuneGraph::Graph`\n- `AWS::PCAConnectorAD::Connector`\n- `AWS::QBusiness::Application`\n- `AWS::QBusiness::DataSource`\n- `AWS::QBusiness::Index`\n- `AWS::QBusiness::WebExperience`\n- `AWS::RDS::DBCluster`\n- `AWS::S3::AccessPoint`\n- `AWS::S3ObjectLambda::AccessPoint`\n- `AWS::S3Outposts::Object`\n- `AWS::SageMaker::Endpoint`\n- `AWS::SageMaker::ExperimentTrialComponent`\n- `AWS::SageMaker::FeatureGroup`\n- `AWS::ServiceDiscovery::Namespace`\n- `AWS::ServiceDiscovery::Service`\n- `AWS::SCN::Instance`\n- `AWS::SNS::PlatformEndpoint`\n- `AWS::SNS::Topic`\n- `AWS::SWF::Domain`\n- `AWS::SQS::Queue`\n- `AWS::SSMMessages::ControlChannel`\n- `AWS::ThinClient::Device`\n- `AWS::ThinClient::Environment`\n- `AWS::Timestream::Database`\n- `AWS::Timestream::Table`\n- `AWS::VerifiedPermissions::PolicyStore`\n\nYou can have only one `resources.type` \ufb01eld per selector. To log data events on more than one resource type, add another selector.\n- *`resources.ARN`* - You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. For example, if resources.type equals `AWS::S3::Object` , the ARN must be in one of the following formats. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value.\n\nThe trailing slash is intentional; do not exclude it. Replace the text between less than and greater than symbols (<>) with resource-specific information.\n\n- `arn::s3:::/`\n- `arn::s3::://`\n\nWhen resources.type equals `AWS::DynamoDB::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::dynamodb:::table/`\n\nWhen resources.type equals `AWS::Lambda::Function` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::lambda:::function:`\n\nWhen resources.type equals `AWS::AppConfig::Configuration` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::appconfig:::application//environment//configuration/`\n\nWhen resources.type equals `AWS::B2BI::Transformer` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::b2bi:::transformer/`\n\nWhen resources.type equals `AWS::Bedrock::AgentAlias` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::bedrock:::agent-alias//`\n\nWhen resources.type equals `AWS::Bedrock::KnowledgeBase` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::bedrock:::knowledge-base/`\n\nWhen resources.type equals `AWS::Cassandra::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cassandra:::/keyspace//table/`\n\nWhen resources.type equals `AWS::CloudFront::KeyValueStore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cloudfront:::key-value-store/`\n\nWhen resources.type equals `AWS::CloudTrail::Channel` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cloudtrail:::channel/`\n\nWhen resources.type equals `AWS::CodeWhisperer::Customization` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::codewhisperer:::customization/`\n\nWhen resources.type equals `AWS::CodeWhisperer::Profile` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::codewhisperer:::profile/`\n\nWhen resources.type equals `AWS::Cognito::IdentityPool` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cognito-identity:::identitypool/`\n\nWhen `resources.type` equals `AWS::DynamoDB::Stream` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::dynamodb:::table//stream/`\n\nWhen `resources.type` equals `AWS::EC2::Snapshot` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::ec2:::snapshot/`\n\nWhen `resources.type` equals `AWS::EMRWAL::Workspace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::emrwal:::workspace/`\n\nWhen `resources.type` equals `AWS::FinSpace::Environment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::finspace:::environment/`\n\nWhen `resources.type` equals `AWS::Glue::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::glue:::table//`\n\nWhen `resources.type` equals `AWS::GreengrassV2::ComponentVersion` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::greengrass:::components/`\n\nWhen `resources.type` equals `AWS::GreengrassV2::Deployment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::greengrass:::deployments/:guardduty:::detector/`\n\nWhen `resources.type` equals `AWS::IoT::Certificate` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iot:::cert/`\n\nWhen `resources.type` equals `AWS::IoT::Thing` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iot:::thing/`\n\nWhen `resources.type` equals `AWS::IoTSiteWise::Asset` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iotsitewise:::asset/`\n\nWhen `resources.type` equals `AWS::IoTSiteWise::TimeSeries` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iotsitewise:::timeseries/`\n\nWhen `resources.type` equals `AWS::IoTTwinMaker::Entity` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iottwinmaker:::workspace//entity/`\n\nWhen `resources.type` equals `AWS::IoTTwinMaker::Workspace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iottwinmaker:::workspace/`\n\nWhen `resources.type` equals `AWS::KendraRanking::ExecutionPlan` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::kendra-ranking:::rescore-execution-plan/`\n\nWhen `resources.type` equals `AWS::KinesisVideo::Stream` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::kinesisvideo:::stream//`\n\nWhen `resources.type` equals `AWS::ManagedBlockchain::Network` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::managedblockchain:::networks/`\n\nWhen `resources.type` equals `AWS::ManagedBlockchain::Node` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::managedblockchain:::nodes/`\n\nWhen `resources.type` equals `AWS::MedicalImaging::Datastore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::medical-imaging:::datastore/`\n\nWhen `resources.type` equals `AWS::NeptuneGraph::Graph` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::neptune-graph:::graph/`\n\nWhen `resources.type` equals `AWS::PCAConnectorAD::Connector` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::pca-connector-ad:::connector/`\n\nWhen `resources.type` equals `AWS::QBusiness::Application` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application/`\n\nWhen `resources.type` equals `AWS::QBusiness::DataSource` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//index//data-source/`\n\nWhen `resources.type` equals `AWS::QBusiness::Index` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//index/`\n\nWhen `resources.type` equals `AWS::QBusiness::WebExperience` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//web-experience/`\n\nWhen `resources.type` equals `AWS::RDS::DBCluster` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::rds:::cluster/`\n\nWhen `resources.type` equals `AWS::S3::AccessPoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in one of the following formats. To log events on all objects in an S3 access point, we recommend that you use only the access point ARN, don\u2019t include the object path, and use the `StartsWith` or `NotStartsWith` operators.\n\n- `arn::s3:::accesspoint/`\n- `arn::s3:::accesspoint//object/`\n\nWhen `resources.type` equals `AWS::S3ObjectLambda::AccessPoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::s3-object-lambda:::accesspoint/`\n\nWhen `resources.type` equals `AWS::S3Outposts::Object` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::s3-outposts:::`\n\nWhen `resources.type` equals `AWS::SageMaker::Endpoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::endpoint/`\n\nWhen `resources.type` equals `AWS::SageMaker::ExperimentTrialComponent` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::experiment-trial-component/`\n\nWhen `resources.type` equals `AWS::SageMaker::FeatureGroup` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::feature-group/`\n\nWhen `resources.type` equals `AWS::SCN::Instance` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::scn:::instance/`\n\nWhen `resources.type` equals `AWS::ServiceDiscovery::Namespace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::servicediscovery:::namespace/`\n\nWhen `resources.type` equals `AWS::ServiceDiscovery::Service` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::servicediscovery:::service/`\n\nWhen `resources.type` equals `AWS::SNS::PlatformEndpoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sns:::endpoint///`\n\nWhen `resources.type` equals `AWS::SNS::Topic` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sns:::`\n\nWhen `resources.type` equals `AWS::SWF::Domain` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::swf:::domain/`\n\nWhen `resources.type` equals `AWS::SQS::Queue` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sqs:::`\n\nWhen `resources.type` equals `AWS::SSMMessages::ControlChannel` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::ssmmessages:::control-channel/`\n\nWhen `resources.type` equals `AWS::ThinClient::Device` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::thinclient:::device/`\n\nWhen `resources.type` equals `AWS::ThinClient::Environment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::thinclient:::environment/`\n\nWhen `resources.type` equals `AWS::Timestream::Database` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::timestream:::database/`\n\nWhen `resources.type` equals `AWS::Timestream::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::timestream:::database//table/`\n\nWhen resources.type equals `AWS::VerifiedPermissions::PolicyStore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::verifiedpermissions:::policy-store/`", "NotEndsWith": "An operator that excludes events that match the last few characters of the event record field specified as the value of `Field` .", "NotEquals": "An operator that excludes events that match the exact value of the event record field specified as the value of `Field` .", "NotStartsWith": "An operator that excludes events that match the first few characters of the event record field specified as the value of `Field` .", @@ -5656,7 +5680,7 @@ "AWS::CloudTrail::Trail AdvancedFieldSelector": { "EndsWith": "An operator that includes events that match the last few characters of the event record field specified as the value of `Field` .", "Equals": "An operator that includes events that match the exact value of the event record field specified as the value of `Field` . This is the only valid operator that you can use with the `readOnly` , `eventCategory` , and `resources.type` fields.", - "Field": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `readOnly` , `eventCategory` , and `eventSource` .\n\nFor CloudTrail data events, supported fields include `readOnly` , `eventCategory` , `eventName` , `resources.type` , and `resources.ARN` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - Optional. Can be set to `Equals` a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - For filtering management events only. This can be set to `NotEquals` `kms.amazonaws.com` or `NotEquals` `rdsdata.amazonaws.com` .\n- *`eventName`* - Can use any operator. You can use it to \ufb01lter in or \ufb01lter out any data event logged to CloudTrail, such as `PutBucket` or `GetSnapshotBlock` . You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator, and the value can be one of the following:\n\n- `AWS::DynamoDB::Table`\n- `AWS::Lambda::Function`\n- `AWS::S3::Object`\n- `AWS::AppConfig::Configuration`\n- `AWS::B2BI::Transformer`\n- `AWS::Bedrock::AgentAlias`\n- `AWS::Bedrock::KnowledgeBase`\n- `AWS::Cassandra::Table`\n- `AWS::CloudFront::KeyValueStore`\n- `AWS::CloudTrail::Channel`\n- `AWS::CodeWhisperer::Customization`\n- `AWS::CodeWhisperer::Profile`\n- `AWS::Cognito::IdentityPool`\n- `AWS::DynamoDB::Stream`\n- `AWS::EC2::Snapshot`\n- `AWS::EMRWAL::Workspace`\n- `AWS::FinSpace::Environment`\n- `AWS::Glue::Table`\n- `AWS::GreengrassV2::ComponentVersion`\n- `AWS::GreengrassV2::Deployment`\n- `AWS::GuardDuty::Detector`\n- `AWS::IoT::Certificate`\n- `AWS::IoT::Thing`\n- `AWS::IoTSiteWise::Asset`\n- `AWS::IoTSiteWise::TimeSeries`\n- `AWS::IoTTwinMaker::Entity`\n- `AWS::IoTTwinMaker::Workspace`\n- `AWS::KendraRanking::ExecutionPlan`\n- `AWS::KinesisVideo::Stream`\n- `AWS::ManagedBlockchain::Network`\n- `AWS::ManagedBlockchain::Node`\n- `AWS::MedicalImaging::Datastore`\n- `AWS::NeptuneGraph::Graph`\n- `AWS::PCAConnectorAD::Connector`\n- `AWS::QBusiness::Application`\n- `AWS::QBusiness::DataSource`\n- `AWS::QBusiness::Index`\n- `AWS::QBusiness::WebExperience`\n- `AWS::RDS::DBCluster`\n- `AWS::S3::AccessPoint`\n- `AWS::S3ObjectLambda::AccessPoint`\n- `AWS::S3Outposts::Object`\n- `AWS::SageMaker::Endpoint`\n- `AWS::SageMaker::ExperimentTrialComponent`\n- `AWS::SageMaker::FeatureGroup`\n- `AWS::ServiceDiscovery::Namespace`\n- `AWS::ServiceDiscovery::Service`\n- `AWS::SCN::Instance`\n- `AWS::SNS::PlatformEndpoint`\n- `AWS::SNS::Topic`\n- `AWS::SQS::Queue`\n- `AWS::SSMMessages::ControlChannel`\n- `AWS::ThinClient::Device`\n- `AWS::ThinClient::Environment`\n- `AWS::Timestream::Database`\n- `AWS::Timestream::Table`\n- `AWS::VerifiedPermissions::PolicyStore`\n\nYou can have only one `resources.type` \ufb01eld per selector. To log data events on more than one resource type, add another selector.\n- *`resources.ARN`* - You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. For example, if resources.type equals `AWS::S3::Object` , the ARN must be in one of the following formats. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value.\n\nThe trailing slash is intentional; do not exclude it. Replace the text between less than and greater than symbols (<>) with resource-specific information.\n\n- `arn::s3:::/`\n- `arn::s3::://`\n\nWhen resources.type equals `AWS::DynamoDB::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::dynamodb:::table/`\n\nWhen resources.type equals `AWS::Lambda::Function` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::lambda:::function:`\n\nWhen resources.type equals `AWS::AppConfig::Configuration` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::appconfig:::application//environment//configuration/`\n\nWhen resources.type equals `AWS::B2BI::Transformer` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::b2bi:::transformer/`\n\nWhen resources.type equals `AWS::Bedrock::AgentAlias` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::bedrock:::agent-alias//`\n\nWhen resources.type equals `AWS::Bedrock::KnowledgeBase` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::bedrock:::knowledge-base/`\n\nWhen resources.type equals `AWS::Cassandra::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cassandra:::/keyspace//table/`\n\nWhen resources.type equals `AWS::CloudFront::KeyValueStore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cloudfront:::key-value-store/`\n\nWhen resources.type equals `AWS::CloudTrail::Channel` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cloudtrail:::channel/`\n\nWhen resources.type equals `AWS::CodeWhisperer::Customization` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::codewhisperer:::customization/`\n\nWhen resources.type equals `AWS::CodeWhisperer::Profile` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::codewhisperer:::profile/`\n\nWhen resources.type equals `AWS::Cognito::IdentityPool` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cognito-identity:::identitypool/`\n\nWhen `resources.type` equals `AWS::DynamoDB::Stream` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::dynamodb:::table//stream/`\n\nWhen `resources.type` equals `AWS::EC2::Snapshot` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::ec2:::snapshot/`\n\nWhen `resources.type` equals `AWS::EMRWAL::Workspace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::emrwal:::workspace/`\n\nWhen `resources.type` equals `AWS::FinSpace::Environment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::finspace:::environment/`\n\nWhen `resources.type` equals `AWS::Glue::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::glue:::table//`\n\nWhen `resources.type` equals `AWS::GreengrassV2::ComponentVersion` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::greengrass:::components/`\n\nWhen `resources.type` equals `AWS::GreengrassV2::Deployment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::greengrass:::deployments/:guardduty:::detector/`\n\nWhen `resources.type` equals `AWS::IoT::Certificate` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iot:::cert/`\n\nWhen `resources.type` equals `AWS::IoT::Thing` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iot:::thing/`\n\nWhen `resources.type` equals `AWS::IoTSiteWise::Asset` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iotsitewise:::asset/`\n\nWhen `resources.type` equals `AWS::IoTSiteWise::TimeSeries` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iotsitewise:::timeseries/`\n\nWhen `resources.type` equals `AWS::IoTTwinMaker::Entity` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iottwinmaker:::workspace//entity/`\n\nWhen `resources.type` equals `AWS::IoTTwinMaker::Workspace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iottwinmaker:::workspace/`\n\nWhen `resources.type` equals `AWS::KendraRanking::ExecutionPlan` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::kendra-ranking:::rescore-execution-plan/`\n\nWhen `resources.type` equals `AWS::KinesisVideo::Stream` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::kinesisvideo:::stream//`\n\nWhen `resources.type` equals `AWS::ManagedBlockchain::Network` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::managedblockchain:::networks/`\n\nWhen `resources.type` equals `AWS::ManagedBlockchain::Node` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::managedblockchain:::nodes/`\n\nWhen `resources.type` equals `AWS::MedicalImaging::Datastore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::medical-imaging:::datastore/`\n\nWhen `resources.type` equals `AWS::NeptuneGraph::Graph` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::neptune-graph:::graph/`\n\nWhen `resources.type` equals `AWS::PCAConnectorAD::Connector` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::pca-connector-ad:::connector/`\n\nWhen `resources.type` equals `AWS::QBusiness::Application` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application/`\n\nWhen `resources.type` equals `AWS::QBusiness::DataSource` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//index//data-source/`\n\nWhen `resources.type` equals `AWS::QBusiness::Index` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//index/`\n\nWhen `resources.type` equals `AWS::QBusiness::WebExperience` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//web-experience/`\n\nWhen `resources.type` equals `AWS::RDS::DBCluster` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::rds:::cluster/`\n\nWhen `resources.type` equals `AWS::S3::AccessPoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in one of the following formats. To log events on all objects in an S3 access point, we recommend that you use only the access point ARN, don\u2019t include the object path, and use the `StartsWith` or `NotStartsWith` operators.\n\n- `arn::s3:::accesspoint/`\n- `arn::s3:::accesspoint//object/`\n\nWhen `resources.type` equals `AWS::S3ObjectLambda::AccessPoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::s3-object-lambda:::accesspoint/`\n\nWhen `resources.type` equals `AWS::S3Outposts::Object` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::s3-outposts:::`\n\nWhen `resources.type` equals `AWS::SageMaker::Endpoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::endpoint/`\n\nWhen `resources.type` equals `AWS::SageMaker::ExperimentTrialComponent` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::experiment-trial-component/`\n\nWhen `resources.type` equals `AWS::SageMaker::FeatureGroup` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::feature-group/`\n\nWhen `resources.type` equals `AWS::SCN::Instance` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::scn:::instance/`\n\nWhen `resources.type` equals `AWS::ServiceDiscovery::Namespace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::servicediscovery:::namespace/`\n\nWhen `resources.type` equals `AWS::ServiceDiscovery::Service` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::servicediscovery:::service/`\n\nWhen `resources.type` equals `AWS::SNS::PlatformEndpoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sns:::endpoint///`\n\nWhen `resources.type` equals `AWS::SNS::Topic` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sns:::`\n\nWhen `resources.type` equals `AWS::SQS::Queue` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sqs:::`\n\nWhen `resources.type` equals `AWS::SSMMessages::ControlChannel` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::ssmmessages:::control-channel/`\n\nWhen `resources.type` equals `AWS::ThinClient::Device` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::thinclient:::device/`\n\nWhen `resources.type` equals `AWS::ThinClient::Environment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::thinclient:::environment/`\n\nWhen `resources.type` equals `AWS::Timestream::Database` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::timestream:::database/`\n\nWhen `resources.type` equals `AWS::Timestream::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::timestream:::database//table/`\n\nWhen resources.type equals `AWS::VerifiedPermissions::PolicyStore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::verifiedpermissions:::policy-store/`", + "Field": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `readOnly` , `eventCategory` , and `eventSource` .\n\nFor CloudTrail data events, supported fields include `readOnly` , `eventCategory` , `eventName` , `resources.type` , and `resources.ARN` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - Optional. Can be set to `Equals` a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - For filtering management events only. This can be set to `NotEquals` `kms.amazonaws.com` or `NotEquals` `rdsdata.amazonaws.com` .\n- *`eventName`* - Can use any operator. You can use it to \ufb01lter in or \ufb01lter out any data event logged to CloudTrail, such as `PutBucket` or `GetSnapshotBlock` . You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator, and the value can be one of the following:\n\n- `AWS::DynamoDB::Table`\n- `AWS::Lambda::Function`\n- `AWS::S3::Object`\n- `AWS::AppConfig::Configuration`\n- `AWS::B2BI::Transformer`\n- `AWS::Bedrock::AgentAlias`\n- `AWS::Bedrock::KnowledgeBase`\n- `AWS::Cassandra::Table`\n- `AWS::CloudFront::KeyValueStore`\n- `AWS::CloudTrail::Channel`\n- `AWS::CodeWhisperer::Customization`\n- `AWS::CodeWhisperer::Profile`\n- `AWS::Cognito::IdentityPool`\n- `AWS::DynamoDB::Stream`\n- `AWS::EC2::Snapshot`\n- `AWS::EMRWAL::Workspace`\n- `AWS::FinSpace::Environment`\n- `AWS::Glue::Table`\n- `AWS::GreengrassV2::ComponentVersion`\n- `AWS::GreengrassV2::Deployment`\n- `AWS::GuardDuty::Detector`\n- `AWS::IoT::Certificate`\n- `AWS::IoT::Thing`\n- `AWS::IoTSiteWise::Asset`\n- `AWS::IoTSiteWise::TimeSeries`\n- `AWS::IoTTwinMaker::Entity`\n- `AWS::IoTTwinMaker::Workspace`\n- `AWS::KendraRanking::ExecutionPlan`\n- `AWS::KinesisVideo::Stream`\n- `AWS::ManagedBlockchain::Network`\n- `AWS::ManagedBlockchain::Node`\n- `AWS::MedicalImaging::Datastore`\n- `AWS::NeptuneGraph::Graph`\n- `AWS::PCAConnectorAD::Connector`\n- `AWS::QBusiness::Application`\n- `AWS::QBusiness::DataSource`\n- `AWS::QBusiness::Index`\n- `AWS::QBusiness::WebExperience`\n- `AWS::RDS::DBCluster`\n- `AWS::S3::AccessPoint`\n- `AWS::S3ObjectLambda::AccessPoint`\n- `AWS::S3Outposts::Object`\n- `AWS::SageMaker::Endpoint`\n- `AWS::SageMaker::ExperimentTrialComponent`\n- `AWS::SageMaker::FeatureGroup`\n- `AWS::ServiceDiscovery::Namespace`\n- `AWS::ServiceDiscovery::Service`\n- `AWS::SCN::Instance`\n- `AWS::SNS::PlatformEndpoint`\n- `AWS::SNS::Topic`\n- `AWS::SWF::Domain`\n- `AWS::SQS::Queue`\n- `AWS::SSMMessages::ControlChannel`\n- `AWS::ThinClient::Device`\n- `AWS::ThinClient::Environment`\n- `AWS::Timestream::Database`\n- `AWS::Timestream::Table`\n- `AWS::VerifiedPermissions::PolicyStore`\n\nYou can have only one `resources.type` \ufb01eld per selector. To log data events on more than one resource type, add another selector.\n- *`resources.ARN`* - You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. For example, if resources.type equals `AWS::S3::Object` , the ARN must be in one of the following formats. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value.\n\nThe trailing slash is intentional; do not exclude it. Replace the text between less than and greater than symbols (<>) with resource-specific information.\n\n- `arn::s3:::/`\n- `arn::s3::://`\n\nWhen resources.type equals `AWS::DynamoDB::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::dynamodb:::table/`\n\nWhen resources.type equals `AWS::Lambda::Function` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::lambda:::function:`\n\nWhen resources.type equals `AWS::AppConfig::Configuration` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::appconfig:::application//environment//configuration/`\n\nWhen resources.type equals `AWS::B2BI::Transformer` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::b2bi:::transformer/`\n\nWhen resources.type equals `AWS::Bedrock::AgentAlias` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::bedrock:::agent-alias//`\n\nWhen resources.type equals `AWS::Bedrock::KnowledgeBase` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::bedrock:::knowledge-base/`\n\nWhen resources.type equals `AWS::Cassandra::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cassandra:::/keyspace//table/`\n\nWhen resources.type equals `AWS::CloudFront::KeyValueStore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cloudfront:::key-value-store/`\n\nWhen resources.type equals `AWS::CloudTrail::Channel` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cloudtrail:::channel/`\n\nWhen resources.type equals `AWS::CodeWhisperer::Customization` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::codewhisperer:::customization/`\n\nWhen resources.type equals `AWS::CodeWhisperer::Profile` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::codewhisperer:::profile/`\n\nWhen resources.type equals `AWS::Cognito::IdentityPool` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cognito-identity:::identitypool/`\n\nWhen `resources.type` equals `AWS::DynamoDB::Stream` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::dynamodb:::table//stream/`\n\nWhen `resources.type` equals `AWS::EC2::Snapshot` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::ec2:::snapshot/`\n\nWhen `resources.type` equals `AWS::EMRWAL::Workspace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::emrwal:::workspace/`\n\nWhen `resources.type` equals `AWS::FinSpace::Environment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::finspace:::environment/`\n\nWhen `resources.type` equals `AWS::Glue::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::glue:::table//`\n\nWhen `resources.type` equals `AWS::GreengrassV2::ComponentVersion` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::greengrass:::components/`\n\nWhen `resources.type` equals `AWS::GreengrassV2::Deployment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::greengrass:::deployments/:guardduty:::detector/`\n\nWhen `resources.type` equals `AWS::IoT::Certificate` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iot:::cert/`\n\nWhen `resources.type` equals `AWS::IoT::Thing` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iot:::thing/`\n\nWhen `resources.type` equals `AWS::IoTSiteWise::Asset` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iotsitewise:::asset/`\n\nWhen `resources.type` equals `AWS::IoTSiteWise::TimeSeries` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iotsitewise:::timeseries/`\n\nWhen `resources.type` equals `AWS::IoTTwinMaker::Entity` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iottwinmaker:::workspace//entity/`\n\nWhen `resources.type` equals `AWS::IoTTwinMaker::Workspace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iottwinmaker:::workspace/`\n\nWhen `resources.type` equals `AWS::KendraRanking::ExecutionPlan` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::kendra-ranking:::rescore-execution-plan/`\n\nWhen `resources.type` equals `AWS::KinesisVideo::Stream` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::kinesisvideo:::stream//`\n\nWhen `resources.type` equals `AWS::ManagedBlockchain::Network` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::managedblockchain:::networks/`\n\nWhen `resources.type` equals `AWS::ManagedBlockchain::Node` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::managedblockchain:::nodes/`\n\nWhen `resources.type` equals `AWS::MedicalImaging::Datastore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::medical-imaging:::datastore/`\n\nWhen `resources.type` equals `AWS::NeptuneGraph::Graph` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::neptune-graph:::graph/`\n\nWhen `resources.type` equals `AWS::PCAConnectorAD::Connector` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::pca-connector-ad:::connector/`\n\nWhen `resources.type` equals `AWS::QBusiness::Application` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application/`\n\nWhen `resources.type` equals `AWS::QBusiness::DataSource` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//index//data-source/`\n\nWhen `resources.type` equals `AWS::QBusiness::Index` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//index/`\n\nWhen `resources.type` equals `AWS::QBusiness::WebExperience` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//web-experience/`\n\nWhen `resources.type` equals `AWS::RDS::DBCluster` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::rds:::cluster/`\n\nWhen `resources.type` equals `AWS::S3::AccessPoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in one of the following formats. To log events on all objects in an S3 access point, we recommend that you use only the access point ARN, don\u2019t include the object path, and use the `StartsWith` or `NotStartsWith` operators.\n\n- `arn::s3:::accesspoint/`\n- `arn::s3:::accesspoint//object/`\n\nWhen `resources.type` equals `AWS::S3ObjectLambda::AccessPoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::s3-object-lambda:::accesspoint/`\n\nWhen `resources.type` equals `AWS::S3Outposts::Object` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::s3-outposts:::`\n\nWhen `resources.type` equals `AWS::SageMaker::Endpoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::endpoint/`\n\nWhen `resources.type` equals `AWS::SageMaker::ExperimentTrialComponent` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::experiment-trial-component/`\n\nWhen `resources.type` equals `AWS::SageMaker::FeatureGroup` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::feature-group/`\n\nWhen `resources.type` equals `AWS::SCN::Instance` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::scn:::instance/`\n\nWhen `resources.type` equals `AWS::ServiceDiscovery::Namespace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::servicediscovery:::namespace/`\n\nWhen `resources.type` equals `AWS::ServiceDiscovery::Service` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::servicediscovery:::service/`\n\nWhen `resources.type` equals `AWS::SNS::PlatformEndpoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sns:::endpoint///`\n\nWhen `resources.type` equals `AWS::SNS::Topic` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sns:::`\n\nWhen `resources.type` equals `AWS::SWF::Domain` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::swf:::domain/`\n\nWhen `resources.type` equals `AWS::SQS::Queue` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sqs:::`\n\nWhen `resources.type` equals `AWS::SSMMessages::ControlChannel` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::ssmmessages:::control-channel/`\n\nWhen `resources.type` equals `AWS::ThinClient::Device` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::thinclient:::device/`\n\nWhen `resources.type` equals `AWS::ThinClient::Environment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::thinclient:::environment/`\n\nWhen `resources.type` equals `AWS::Timestream::Database` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::timestream:::database/`\n\nWhen `resources.type` equals `AWS::Timestream::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::timestream:::database//table/`\n\nWhen resources.type equals `AWS::VerifiedPermissions::PolicyStore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::verifiedpermissions:::policy-store/`", "NotEndsWith": "An operator that excludes events that match the last few characters of the event record field specified as the value of `Field` .", "NotEquals": "An operator that excludes events that match the exact value of the event record field specified as the value of `Field` .", "NotStartsWith": "An operator that excludes events that match the first few characters of the event record field specified as the value of `Field` .", @@ -5684,7 +5708,7 @@ "AlarmActions": "The list of actions to execute when this alarm transitions into an ALARM state from any other state. Specify each action as an Amazon Resource Name (ARN). For more information about creating alarms and the actions that you can specify, see [PutMetricAlarm](https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_PutMetricAlarm.html) in the *Amazon CloudWatch API Reference* .", "AlarmDescription": "The description of the alarm.", "AlarmName": "The name of the alarm. If you don't specify a name, AWS CloudFormation generates a unique physical ID and uses that ID for the alarm name.\n\n> If you specify a name, you cannot perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you must replace the resource, specify a new name.", - "ComparisonOperator": "The arithmetic operation to use when comparing the specified statistic and threshold. The specified statistic value is used as the first operand.\n\nYou can specify the following values: `GreaterThanThreshold` , `GreaterThanOrEqualToThreshold` , `LessThanThreshold` , or `LessThanOrEqualToThreshold` .", + "ComparisonOperator": "The arithmetic operation to use when comparing the specified statistic and threshold. The specified statistic value is used as the first operand.", "DatapointsToAlarm": "The number of datapoints that must be breaching to trigger the alarm. This is used only if you are setting an \"M out of N\" alarm. In that case, this value is the M, and the value that you set for `EvaluationPeriods` is the N value. For more information, see [Evaluating an Alarm](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#alarm-evaluation) in the *Amazon CloudWatch User Guide* .\n\nIf you omit this parameter, CloudWatch uses the same value here that you set for `EvaluationPeriods` , and the alarm goes to alarm state if that many consecutive periods are breaching.", "Dimensions": "The dimensions for the metric associated with the alarm. For an alarm based on a math expression, you can't specify `Dimensions` . Instead, you use `Metrics` .", "EvaluateLowSampleCountPercentile": "Used only for alarms based on percentiles. If `ignore` , the alarm state does not change during periods with too few data points to be statistically significant. If `evaluate` or this parameter is not used, the alarm is always evaluated and possibly changes state no matter how many data points are available.", @@ -5999,7 +6023,7 @@ "AWS::CodeBuild::Project WebhookFilter": { "ExcludeMatchedPattern": "Used to indicate that the `pattern` determines which webhook events do not trigger a build. If true, then a webhook event that does not match the `pattern` triggers a build. If false, then a webhook event that matches the `pattern` triggers a build.", "Pattern": "For a `WebHookFilter` that uses `EVENT` type, a comma-separated string that specifies one or more events. For example, the webhook filter `PUSH, PULL_REQUEST_CREATED, PULL_REQUEST_UPDATED` allows all push, pull request created, and pull request updated events to trigger a build.\n\nFor a `WebHookFilter` that uses any of the other filter types, a regular expression pattern. For example, a `WebHookFilter` that uses `HEAD_REF` for its `type` and the pattern `^refs/heads/` triggers a build when the head reference is a branch with a reference name `refs/heads/branch-name` .", - "Type": "The type of webhook filter. There are six webhook filter types: `EVENT` , `ACTOR_ACCOUNT_ID` , `HEAD_REF` , `BASE_REF` , `FILE_PATH` , and `COMMIT_MESSAGE` .\n\n- **EVENT** - A webhook event triggers a build when the provided `pattern` matches one of five event types: `PUSH` , `PULL_REQUEST_CREATED` , `PULL_REQUEST_UPDATED` , `PULL_REQUEST_REOPENED` , and `PULL_REQUEST_MERGED` . The `EVENT` patterns are specified as a comma-separated string. For example, `PUSH, PULL_REQUEST_CREATED, PULL_REQUEST_UPDATED` filters all push, pull request created, and pull request updated events.\n\n> The `PULL_REQUEST_REOPENED` works with GitHub and GitHub Enterprise only.\n- **ACTOR_ACCOUNT_ID** - A webhook event triggers a build when a GitHub, GitHub Enterprise, or Bitbucket account ID matches the regular expression `pattern` .\n- **HEAD_REF** - A webhook event triggers a build when the head reference matches the regular expression `pattern` . For example, `refs/heads/branch-name` and `refs/tags/tag-name` .\n\nWorks with GitHub and GitHub Enterprise push, GitHub and GitHub Enterprise pull request, Bitbucket push, and Bitbucket pull request events.\n- **BASE_REF** - A webhook event triggers a build when the base reference matches the regular expression `pattern` . For example, `refs/heads/branch-name` .\n\n> Works with pull request events only.\n- **FILE_PATH** - A webhook triggers a build when the path of a changed file matches the regular expression `pattern` .\n\n> Works with GitHub and Bitbucket events push and pull requests events. Also works with GitHub Enterprise push events, but does not work with GitHub Enterprise pull request events.\n- **COMMIT_MESSAGE** - A webhook triggers a build when the head commit message matches the regular expression `pattern` .\n\n> Works with GitHub and Bitbucket events push and pull requests events. Also works with GitHub Enterprise push events, but does not work with GitHub Enterprise pull request events." + "Type": "The type of webhook filter. There are six webhook filter types: `EVENT` , `ACTOR_ACCOUNT_ID` , `HEAD_REF` , `BASE_REF` , `FILE_PATH` , and `COMMIT_MESSAGE` .\n\n- EVENT\n\n- A webhook event triggers a build when the provided `pattern` matches one of six event types: `PUSH` , `PULL_REQUEST_CREATED` , `PULL_REQUEST_UPDATED` , `PULL_REQUEST_CLOSED` , `PULL_REQUEST_REOPENED` , and `PULL_REQUEST_MERGED` . The `EVENT` patterns are specified as a comma-separated string. For example, `PUSH, PULL_REQUEST_CREATED, PULL_REQUEST_UPDATED` filters all push, pull request created, and pull request updated events.\n\n> The `PULL_REQUEST_REOPENED` works with GitHub and GitHub Enterprise only.\n- ACTOR_ACCOUNT_ID\n\n- A webhook event triggers a build when a GitHub, GitHub Enterprise, or Bitbucket account ID matches the regular expression `pattern` .\n- HEAD_REF\n\n- A webhook event triggers a build when the head reference matches the regular expression `pattern` . For example, `refs/heads/branch-name` and `refs/tags/tag-name` .\n\nWorks with GitHub and GitHub Enterprise push, GitHub and GitHub Enterprise pull request, Bitbucket push, and Bitbucket pull request events.\n- BASE_REF\n\n- A webhook event triggers a build when the base reference matches the regular expression `pattern` . For example, `refs/heads/branch-name` .\n\n> Works with pull request events only.\n- FILE_PATH\n\n- A webhook triggers a build when the path of a changed file matches the regular expression `pattern` .\n\n> Works with GitHub and Bitbucket events push and pull requests events. Also works with GitHub Enterprise push events, but does not work with GitHub Enterprise pull request events.\n- COMMIT_MESSAGE\n\n- A webhook triggers a build when the head commit message matches the regular expression `pattern` .\n\n> Works with GitHub and Bitbucket events push and pull requests events. Also works with GitHub Enterprise push events, but does not work with GitHub Enterprise pull request events." }, "AWS::CodeBuild::ReportGroup": { "DeleteReports": "When deleting a report group, specifies if reports within the report group should be deleted.\n\n- **true** - Deletes any reports that belong to the report group before deleting the report group.\n- **false** - You must delete any reports in the report group. This is the default value. If you delete a report group that contains one or more reports, an exception is thrown.", @@ -6296,6 +6320,7 @@ "ArtifactStore": "The S3 bucket where artifacts for the pipeline are stored.\n\n> You must include either `artifactStore` or `artifactStores` in your pipeline, but you cannot use both. If you create a cross-region action in your pipeline, you must use `artifactStores` .", "ArtifactStores": "A mapping of `artifactStore` objects and their corresponding AWS Regions. There must be an artifact store for the pipeline Region and for each cross-region action in the pipeline.\n\n> You must include either `artifactStore` or `artifactStores` in your pipeline, but you cannot use both. If you create a cross-region action in your pipeline, you must use `artifactStores` .", "DisableInboundStageTransitions": "Represents the input of a `DisableStageTransition` action.", + "ExecutionMode": "The method that the pipeline will use to handle multiple executions. The default mode is SUPERSEDED.", "Name": "The name of the pipeline.", "PipelineType": "CodePipeline provides the following pipeline types, which differ in characteristics and price, so that you can tailor your pipeline features and cost to the needs of your applications.\n\n- V1 type pipelines have a JSON structure that contains standard pipeline, stage, and action-level parameters.\n- V2 type pipelines have the same structure as a V1 type, along with additional parameters for release safety and trigger configuration.\n\n> Including V2 parameters, such as triggers on Git tags, in the pipeline JSON when creating or updating a pipeline will result in the pipeline having the V2 type of pipeline and the associated costs. \n\nFor information about pricing for CodePipeline, see [Pricing](https://docs.aws.amazon.com/codepipeline/pricing/) .\n\nFor information about which type of pipeline to choose, see [What type of pipeline is right for me?](https://docs.aws.amazon.com/codepipeline/latest/userguide/pipeline-types-planning.html) .", "RestartExecutionOnUpdate": "Indicates whether to rerun the CodePipeline pipeline after you update it.", @@ -6339,11 +6364,27 @@ "Id": "The ID used to identify the key. For an AWS KMS key, you can use the key ID, the key ARN, or the alias ARN.\n\n> Aliases are recognized only in the account that created the AWS KMS key. For cross-account actions, you can only use the key ID or key ARN to identify the key. Cross-account actions involve using the role from the other account (AccountB), so specifying the key ID will use the key from the other account (AccountB).", "Type": "The type of encryption key, such as an AWS KMS key. When creating or updating a pipeline, the value must be set to 'KMS'." }, + "AWS::CodePipeline::Pipeline GitBranchFilterCriteria": { + "Excludes": "The list of patterns of Git branches that, when a commit is pushed, are to be excluded from starting the pipeline.", + "Includes": "The list of patterns of Git branches that, when a commit is pushed, are to be included as criteria that starts the pipeline." + }, "AWS::CodePipeline::Pipeline GitConfiguration": { + "PullRequest": "The field where the repository event that will start the pipeline is specified as pull requests.", "Push": "The field where the repository event that will start the pipeline, such as pushing Git tags, is specified with details.", "SourceActionName": "The name of the pipeline source action where the trigger configuration, such as Git tags, is specified. The trigger configuration will start the pipeline upon the specified change only.\n\n> You can only specify one trigger configuration per source action." }, + "AWS::CodePipeline::Pipeline GitFilePathFilterCriteria": { + "Excludes": "The list of patterns of Git repository file paths that, when a commit is pushed, are to be excluded from starting the pipeline.", + "Includes": "The list of patterns of Git repository file paths that, when a commit is pushed, are to be included as criteria that starts the pipeline." + }, + "AWS::CodePipeline::Pipeline GitPullRequestFilter": { + "Branches": "The field that specifies to filter on branches for the pull request trigger configuration.", + "Events": "The field that specifies which pull request events to filter on (opened, updated, closed) for the trigger configuration.", + "FilePaths": "The field that specifies to filter on file paths for the pull request trigger configuration." + }, "AWS::CodePipeline::Pipeline GitPushFilter": { + "Branches": "The field that specifies to filter on branches for the push trigger configuration.", + "FilePaths": "The field that specifies to filter on file paths for the push trigger configuration.", "Tags": "The field that contains the details for the Git tags trigger configuration." }, "AWS::CodePipeline::Pipeline GitTagFilterCriteria": { @@ -6529,7 +6570,7 @@ "AdminCreateUserConfig": "The configuration for creating a new user profile.", "AliasAttributes": "Attributes supported as an alias for this user pool. Possible values: *phone_number* , *email* , or *preferred_username* .\n\n> This user pool property cannot be updated.", "AutoVerifiedAttributes": "The attributes to be auto-verified. Possible values: *email* , *phone_number* .", - "DeletionProtection": "When active, `DeletionProtection` prevents accidental deletion of your user\npool. Before you can delete a user pool that you have protected against deletion, you\nmust deactivate this feature.\n\nWhen you try to delete a protected user pool in a `DeleteUserPool` API request, Amazon Cognito returns an `InvalidParameterException` error. To delete a protected user pool, send a new `DeleteUserPool` request after you deactivate deletion protection in an `UpdateUserPool` API request.", + "DeletionProtection": "When active, `DeletionProtection` prevents accidental deletion of your user pool. Before you can delete a user pool that you have protected against deletion, you must deactivate this feature.\n\nWhen you try to delete a protected user pool in a `DeleteUserPool` API request, Amazon Cognito returns an `InvalidParameterException` error. To delete a protected user pool, send a new `DeleteUserPool` request after you deactivate deletion protection in an `UpdateUserPool` API request.", "DeviceConfiguration": "The device-remembering configuration for a user pool. A null value indicates that you have deactivated device remembering in your user pool.\n\n> When you provide a value for any `DeviceConfiguration` field, you activate the Amazon Cognito device-remembering feature.", "EmailConfiguration": "The email configuration of your user pool. The email configuration type sets your preferred sending method, AWS Region, and sender for messages from your user pool.", "EmailVerificationMessage": "This parameter is no longer used. See [VerificationMessageTemplateType](https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_VerificationMessageTemplateType.html) .", @@ -6599,7 +6640,7 @@ "VerifyAuthChallengeResponse": "Verifies the authentication challenge response." }, "AWS::Cognito::UserPool NumberAttributeConstraints": { - "MaxValue": "The maximum value of an attribute that is of the number data type.", + "MaxValue": "The maximum length of a number attribute value. Must be a number less than or equal to `2^1023` , represented as a string with a length of 131072 characters or fewer.", "MinValue": "The minimum value of an attribute that is of the number data type." }, "AWS::Cognito::UserPool PasswordPolicy": { @@ -6636,7 +6677,7 @@ "SnsRegion": "The AWS Region to use with Amazon SNS integration. You can choose the same Region as your user pool, or a supported *Legacy Amazon SNS alternate Region* .\n\nAmazon Cognito resources in the Asia Pacific (Seoul) AWS Region must use your Amazon SNS configuration in the Asia Pacific (Tokyo) Region. For more information, see [SMS message settings for Amazon Cognito user pools](https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-sms-settings.html) ." }, "AWS::Cognito::UserPool StringAttributeConstraints": { - "MaxLength": "The maximum length.", + "MaxLength": "The maximum length of a string attribute value. Must be a number less than or equal to `2^1023` , represented as a string with a length of 131072 characters or fewer.", "MinLength": "The minimum length." }, "AWS::Cognito::UserPool UserAttributeUpdateSettings": { @@ -6710,7 +6751,7 @@ "AWS::Cognito::UserPoolIdentityProvider": { "AttributeMapping": "A mapping of IdP attributes to standard and custom user pool attributes.", "IdpIdentifiers": "A list of IdP identifiers.", - "ProviderDetails": "The scopes, URLs, and identifiers for your external identity provider. The following\nexamples describe the provider detail keys for each IdP type. These values and their\nschema are subject to change. Social IdP `authorize_scopes` values must match\nthe values listed here.\n\n- **OpenID Connect (OIDC)** - Amazon Cognito accepts the following elements when it can't discover endpoint URLs from `oidc_issuer` : `attributes_url` , `authorize_url` , `jwks_uri` , `token_url` .\n\nCreate or update request: `\"ProviderDetails\": { \"attributes_request_method\": \"GET\", \"attributes_url\": \"https://auth.example.com/userInfo\", \"authorize_scopes\": \"openid profile email\", \"authorize_url\": \"https://auth.example.com/authorize\", \"client_id\": \"1example23456789\", \"client_secret\": \"provider-app-client-secret\", \"jwks_uri\": \"https://auth.example.com/.well-known/jwks.json\", \"oidc_issuer\": \"https://auth.example.com\", \"token_url\": \"https://example.com/token\" }`\n\nDescribe response: `\"ProviderDetails\": { \"attributes_request_method\": \"GET\", \"attributes_url\": \"https://auth.example.com/userInfo\", \"attributes_url_add_attributes\": \"false\", \"authorize_scopes\": \"openid profile email\", \"authorize_url\": \"https://auth.example.com/authorize\", \"client_id\": \"1example23456789\", \"client_secret\": \"provider-app-client-secret\", \"jwks_uri\": \"https://auth.example.com/.well-known/jwks.json\", \"oidc_issuer\": \"https://auth.example.com\", \"token_url\": \"https://example.com/token\" }`\n- **SAML** - Create or update request with Metadata URL: `\"ProviderDetails\": { \"IDPInit\": \"true\", \"IDPSignout\": \"true\", \"EncryptedResponses\" : \"true\", \"MetadataURL\": \"https://auth.example.com/sso/saml/metadata\", \"RequestSigningAlgorithm\": \"rsa-sha256\" }`\n\nCreate or update request with Metadata file: `\"ProviderDetails\": { \"IDPInit\": \"true\", \"IDPSignout\": \"true\", \"EncryptedResponses\" : \"true\", \"MetadataFile\": \"[metadata XML]\", \"RequestSigningAlgorithm\": \"rsa-sha256\" }`\n\nThe value of `MetadataFile` must be the plaintext metadata document with all quote (\") characters escaped by backslashes.\n\nDescribe response: `\"ProviderDetails\": { \"IDPInit\": \"true\", \"IDPSignout\": \"true\", \"EncryptedResponses\" : \"true\", \"ActiveEncryptionCertificate\": \"[certificate]\", \"MetadataURL\": \"https://auth.example.com/sso/saml/metadata\", \"RequestSigningAlgorithm\": \"rsa-sha256\", \"SLORedirectBindingURI\": \"https://auth.example.com/slo/saml\", \"SSORedirectBindingURI\": \"https://auth.example.com/sso/saml\" }`\n- **LoginWithAmazon** - Create or update request: `\"ProviderDetails\": { \"authorize_scopes\": \"profile postal_code\", \"client_id\": \"amzn1.application-oa2-client.1example23456789\", \"client_secret\": \"provider-app-client-secret\"`\n\nDescribe response: `\"ProviderDetails\": { \"attributes_url\": \"https://api.amazon.com/user/profile\", \"attributes_url_add_attributes\": \"false\", \"authorize_scopes\": \"profile postal_code\", \"authorize_url\": \"https://www.amazon.com/ap/oa\", \"client_id\": \"amzn1.application-oa2-client.1example23456789\", \"client_secret\": \"provider-app-client-secret\", \"token_request_method\": \"POST\", \"token_url\": \"https://api.amazon.com/auth/o2/token\" }`\n- **Google** - Create or update request: `\"ProviderDetails\": { \"authorize_scopes\": \"email profile openid\", \"client_id\": \"1example23456789.apps.googleusercontent.com\", \"client_secret\": \"provider-app-client-secret\" }`\n\nDescribe response: `\"ProviderDetails\": { \"attributes_url\": \"https://people.googleapis.com/v1/people/me?personFields=\", \"attributes_url_add_attributes\": \"true\", \"authorize_scopes\": \"email profile openid\", \"authorize_url\": \"https://accounts.google.com/o/oauth2/v2/auth\", \"client_id\": \"1example23456789.apps.googleusercontent.com\", \"client_secret\": \"provider-app-client-secret\", \"oidc_issuer\": \"https://accounts.google.com\", \"token_request_method\": \"POST\", \"token_url\": \"https://www.googleapis.com/oauth2/v4/token\" }`\n- **SignInWithApple** - Create or update request: `\"ProviderDetails\": { \"authorize_scopes\": \"email name\", \"client_id\": \"com.example.cognito\", \"private_key\": \"1EXAMPLE\", \"key_id\": \"2EXAMPLE\", \"team_id\": \"3EXAMPLE\" }`\n\nDescribe response: `\"ProviderDetails\": { \"attributes_url_add_attributes\": \"false\", \"authorize_scopes\": \"email name\", \"authorize_url\": \"https://appleid.apple.com/auth/authorize\", \"client_id\": \"com.example.cognito\", \"key_id\": \"1EXAMPLE\", \"oidc_issuer\": \"https://appleid.apple.com\", \"team_id\": \"2EXAMPLE\", \"token_request_method\": \"POST\", \"token_url\": \"https://appleid.apple.com/auth/token\" }`\n- **Facebook** - Create or update request: `\"ProviderDetails\": { \"api_version\": \"v17.0\", \"authorize_scopes\": \"public_profile, email\", \"client_id\": \"1example23456789\", \"client_secret\": \"provider-app-client-secret\" }`\n\nDescribe response: `\"ProviderDetails\": { \"api_version\": \"v17.0\", \"attributes_url\": \"https://graph.facebook.com/v17.0/me?fields=\", \"attributes_url_add_attributes\": \"true\", \"authorize_scopes\": \"public_profile, email\", \"authorize_url\": \"https://www.facebook.com/v17.0/dialog/oauth\", \"client_id\": \"1example23456789\", \"client_secret\": \"provider-app-client-secret\", \"token_request_method\": \"GET\", \"token_url\": \"https://graph.facebook.com/v17.0/oauth/access_token\" }`", + "ProviderDetails": "The IdP details. The following list describes the provider detail keys for each IdP type.\n\n- For Google and Login with Amazon:\n\n- client_id\n- client_secret\n- authorize_scopes\n- For Facebook:\n\n- client_id\n- client_secret\n- authorize_scopes\n- api_version\n- For Sign in with Apple:\n\n- client_id\n- team_id\n- key_id\n- private_key\n- authorize_scopes\n- For OpenID Connect (OIDC) providers:\n\n- client_id\n- client_secret\n- attributes_request_method\n- oidc_issuer\n- authorize_scopes\n- The following keys are only present if Amazon Cognito didn't discover them at the `oidc_issuer` URL.\n\n- authorize_url\n- token_url\n- attributes_url\n- jwks_uri\n- Amazon Cognito sets the value of the following keys automatically. They are read-only.\n\n- attributes_url_add_attributes\n- For SAML providers:\n\n- MetadataFile or MetadataURL\n- IDPSignout *optional*", "ProviderName": "The IdP name.", "ProviderType": "The IdP type.", "UserPoolId": "The user pool ID." @@ -6991,7 +7032,7 @@ "ParameterValue": "Another part of the key-value pair." }, "AWS::Config::ConformancePack TemplateSSMDocumentDetails": { - "DocumentName": "The name or Amazon Resource Name (ARN) of the SSM document to use to create a conformance pack. If you use the document name, AWS Config checks only your account and AWS Region for the SSM document. If you want to use an SSM document from another Region or account, you must provide the ARN.", + "DocumentName": "The name or Amazon Resource Name (ARN) of the SSM document to use to create a conformance pack. If you use the document name, AWS Config checks only your account and AWS Region for the SSM document.", "DocumentVersion": "The version of the SSM document to use to create a conformance pack. By default, AWS Config uses the latest version.\n\n> This field is optional." }, "AWS::Config::DeliveryChannel": { @@ -7554,7 +7595,7 @@ "SecondaryEmail": "The user's secondary email address. If you provide a secondary email, the user receives email notifications -- other than password reset notifications -- to this email address instead of to their primary email address.\n\n*Pattern* : `(?=^.{0,265}$)[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\\.[a-zA-Z]{2,63}`" }, "AWS::Connect::User UserPhoneConfig": { - "AfterContactWorkTimeLimit": "The After Call Work (ACW) timeout setting, in seconds.\n\n> When returned by a `SearchUsers` call, `AfterContactWorkTimeLimit` is returned in milliseconds.", + "AfterContactWorkTimeLimit": "The After Call Work (ACW) timeout setting, in seconds. This parameter has a minimum value of 0 and a maximum value of 2,000,000 seconds (24 days). Enter 0 if you don't want to allocate a specific amount of ACW time. It essentially means an indefinite amount of time. When the conversation ends, ACW starts; the agent must choose Close contact to end ACW.\n\n> When returned by a `SearchUsers` call, `AfterContactWorkTimeLimit` is returned in milliseconds.", "AutoAccept": "The Auto accept setting.", "DeskPhoneNumber": "The phone number for the user's desk phone.", "PhoneType": "The phone type." @@ -7627,15 +7668,35 @@ "Key": "", "Value": "" }, + "AWS::ControlTower::EnabledBaseline": { + "BaselineIdentifier": "The specific `Baseline` enabled as part of the `EnabledBaseline` resource.", + "BaselineVersion": "The enabled version of the `Baseline` .", + "Parameters": "Parameters that are applied when enabling this `Baseline` . These parameters configure the behavior of the baseline.", + "Tags": "Tags associated with input to `EnableBaseline` .", + "TargetIdentifier": "The target on which to enable the `Baseline` ." + }, + "AWS::ControlTower::EnabledBaseline Parameter": { + "Key": "A string denoting the parameter key.", + "Value": "A low-level `Document` object of any type (for example, a Java Object)." + }, + "AWS::ControlTower::EnabledBaseline Tag": { + "Key": "A string that identifies a key-value pair.", + "Value": "A string parameter that describes an `EnabledBaseline` resource." + }, "AWS::ControlTower::EnabledControl": { - "ControlIdentifier": "The ARN of the control. Only *Strongly recommended* and *Elective* controls are permitted, with the exception of the *landing zone Region deny* control. For information on how to find the `controlIdentifier` , see [the overview page](https://docs.aws.amazon.com//controltower/latest/APIReference/Welcome.html) .", + "ControlIdentifier": "The ARN of the control. Only *Strongly recommended* and *Elective* controls are permitted, with the exception of the *Region deny* control. For information on how to find the `controlIdentifier` , see [the overview page](https://docs.aws.amazon.com//controltower/latest/APIReference/Welcome.html) .", "Parameters": "Array of `EnabledControlParameter` objects.", + "Tags": "Tags to be applied to the enabled control.", "TargetIdentifier": "The ARN of the organizational unit. For information on how to find the `targetIdentifier` , see [the overview page](https://docs.aws.amazon.com//controltower/latest/APIReference/Welcome.html) ." }, "AWS::ControlTower::EnabledControl EnabledControlParameter": { "Key": "The key of a key/value pair. It is of type `string` .", "Value": "The value of a key/value pair. It can be of type `array` , `string` , `number` , `object` , or `boolean` . [Note: The *Type* field that follows may show a single type such as Number, which is only one possible type.]" }, + "AWS::ControlTower::EnabledControl Tag": { + "Key": "The key name of the tag. You can specify a value that is 1 to 128 Unicode characters in length and cannot be prefixed with `aws` .", + "Value": "The value for the tag. You can specify a value that is 0 to 256 Unicode characters in length and cannot be prefixed with `aws` ." + }, "AWS::ControlTower::LandingZone": { "Manifest": "The landing zone manifest JSON text file that specifies the landing zone configurations.", "Tags": "Tags to be applied to the landing zone.", @@ -9744,7 +9805,7 @@ }, "AWS::DynamoDB::GlobalTable Projection": { "NonKeyAttributes": "Represents the non-key attribute names which will be projected into the index.\n\nFor local secondary indexes, the total count of `NonKeyAttributes` summed across all of the local secondary indexes, must not exceed 100. If you project the same attribute into two different indexes, this counts as two distinct attributes when determining the total.", - "ProjectionType": "The set of attributes that are projected into the index:\n\n- `KEYS_ONLY` - Only the index and primary keys are projected into the index.\n- `INCLUDE` - In addition to the attributes described in `KEYS_ONLY` , the secondary index will include other non-key attributes that you specify.\n- `ALL` - All of the table attributes are projected into the index." + "ProjectionType": "The set of attributes that are projected into the index:\n\n- `KEYS_ONLY` - Only the index and primary keys are projected into the index.\n- `INCLUDE` - In addition to the attributes described in `KEYS_ONLY` , the secondary index will include other non-key attributes that you specify.\n- `ALL` - All of the table attributes are projected into the index.\n\nWhen using the DynamoDB console, `ALL` is selected by default." }, "AWS::DynamoDB::GlobalTable ReadProvisionedThroughputSettings": { "ReadCapacityAutoScalingSettings": "Specifies auto scaling settings for the replica table or global secondary index.", @@ -9858,7 +9919,7 @@ }, "AWS::DynamoDB::Table Projection": { "NonKeyAttributes": "Represents the non-key attribute names which will be projected into the index.\n\nFor local secondary indexes, the total count of `NonKeyAttributes` summed across all of the local secondary indexes, must not exceed 100. If you project the same attribute into two different indexes, this counts as two distinct attributes when determining the total.", - "ProjectionType": "The set of attributes that are projected into the index:\n\n- `KEYS_ONLY` - Only the index and primary keys are projected into the index.\n- `INCLUDE` - In addition to the attributes described in `KEYS_ONLY` , the secondary index will include other non-key attributes that you specify.\n- `ALL` - All of the table attributes are projected into the index." + "ProjectionType": "The set of attributes that are projected into the index:\n\n- `KEYS_ONLY` - Only the index and primary keys are projected into the index.\n- `INCLUDE` - In addition to the attributes described in `KEYS_ONLY` , the secondary index will include other non-key attributes that you specify.\n- `ALL` - All of the table attributes are projected into the index.\n\nWhen using the DynamoDB console, `ALL` is selected by default." }, "AWS::DynamoDB::Table ProvisionedThroughput": { "ReadCapacityUnits": "The maximum number of strongly consistent reads consumed per second before DynamoDB returns a `ThrottlingException` . For more information, see [Specifying Read and Write Requirements](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughput.html) in the *Amazon DynamoDB Developer Guide* .\n\nIf read/write capacity mode is `PAY_PER_REQUEST` the value is set to 0.", @@ -9956,7 +10017,6 @@ "ClientCidrBlock": "The IPv4 address range, in CIDR notation, from which to assign client IP addresses. The address range cannot overlap with the local CIDR of the VPC in which the associated subnet is located, or the routes that you add manually. The address range cannot be changed after the Client VPN endpoint has been created. Client CIDR range must have a size of at least /22 and must not be greater than /12.", "ClientConnectOptions": "The options for managing connection authorization for new client connections.", "ClientLoginBannerOptions": "Options for enabling a customizable text banner that will be displayed on AWS provided clients when a VPN session is established.", - "ClientRouteMonitoringOptions": "", "ConnectionLogOptions": "Information about the client connection logging options.\n\nIf you enable client connection logging, data about client connections is sent to a Cloudwatch Logs log stream. The following information is logged:\n\n- Client connection requests\n- Client connection results (successful and unsuccessful)\n- Reasons for unsuccessful client connection requests\n- Client connection termination time", "Description": "A brief description of the Client VPN endpoint.", "DnsServers": "Information about the DNS servers to be used for DNS resolution. A Client VPN endpoint can have up to two DNS servers. If no DNS server is specified, the DNS address configured on the device is used for the DNS server.", @@ -9987,9 +10047,6 @@ "BannerText": "Customizable text that will be displayed in a banner on AWS provided clients when a VPN session is established. UTF-8 encoded characters only. Maximum of 1400 characters.", "Enabled": "Enable or disable a customizable text banner that will be displayed on AWS provided clients when a VPN session is established.\n\nValid values: `true | false`\n\nDefault value: `false`" }, - "AWS::EC2::ClientVpnEndpoint ClientRouteMonitoringOptions": { - "Enabled": "" - }, "AWS::EC2::ClientVpnEndpoint ConnectionLogOptions": { "CloudwatchLogGroup": "The name of the CloudWatch Logs log group. Required if connection logging is enabled.", "CloudwatchLogStream": "The name of the CloudWatch Logs log stream to which the connection data is published.", @@ -10088,7 +10145,7 @@ "Placement": "The location where the instance launched, if applicable.", "Priority": "The priority for the launch template override. The highest priority is launched first.\n\nIf the On-Demand `AllocationStrategy` is set to `prioritized` , EC2 Fleet uses priority to determine which launch template override to use first in fulfilling On-Demand capacity.\n\nIf the Spot `AllocationStrategy` is set to `capacity-optimized-prioritized` , EC2 Fleet uses priority on a best-effort basis to determine which launch template override to use in fulfilling Spot capacity, but optimizes for capacity first.\n\nValid values are whole numbers starting at `0` . The lower the number, the higher the priority. If no number is set, the launch template override has the lowest priority. You can set the same priority for different launch template overrides.", "SubnetId": "The IDs of the subnets in which to launch the instances. Separate multiple subnet IDs using commas (for example, `subnet-1234abcdeexample1, subnet-0987cdef6example2` ). A request of type `instant` can have only one subnet ID.", - "WeightedCapacity": "The number of units provided by the specified instance type." + "WeightedCapacity": "The number of units provided by the specified instance type.\n\n> When specifying weights, the price used in the `lowest-price` and `price-capacity-optimized` allocation strategies is per *unit* hour (where the instance price is divided by the specified weight). However, if all the specified weights are above the requested `TargetCapacity` , resulting in only 1 instance being launched, the price used is per *instance* hour." }, "AWS::EC2::EC2Fleet FleetLaunchTemplateSpecificationRequest": { "LaunchTemplateId": "The ID of the launch template.\n\nYou must specify the `LaunchTemplateId` or the `LaunchTemplateName` , but not both.", @@ -10110,13 +10167,14 @@ "InstanceGenerations": "Indicates whether current or previous generation instance types are included. The current generation instance types are recommended for use. Current generation instance types are typically the latest two to three generations in each instance family. For more information, see [Instance types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) in the *Amazon EC2 User Guide* .\n\nFor current generation instance types, specify `current` .\n\nFor previous generation instance types, specify `previous` .\n\nDefault: Current and previous generation instance types", "LocalStorage": "Indicates whether instance types with instance store volumes are included, excluded, or required. For more information, [Amazon EC2 instance store](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/InstanceStorage.html) in the *Amazon EC2 User Guide* .\n\n- To include instance types with instance store volumes, specify `included` .\n- To require only instance types with instance store volumes, specify `required` .\n- To exclude instance types with instance store volumes, specify `excluded` .\n\nDefault: `included`", "LocalStorageTypes": "The type of local storage that is required.\n\n- For instance types with hard disk drive (HDD) storage, specify `hdd` .\n- For instance types with solid state drive (SSD) storage, specify `ssd` .\n\nDefault: `hdd` and `ssd`", + "MaxSpotPriceAsPercentageOfOptimalOnDemandPrice": "[Price protection] The price protection threshold for Spot Instances, as a percentage of an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nIf you set `DesiredCapacityType` to `vcpu` or `memory-mib` , the price protection threshold is based on the per vCPU or per memory price instead of the per instance price.\n\n> Only one of `SpotMaxPricePercentageOverLowestPrice` or `MaxSpotPriceAsPercentageOfOptimalOnDemandPrice` can be specified. If you don't specify either, Amazon EC2 will automatically apply optimal price protection to consistently select from a wide range of instance types. To indicate no price protection threshold for Spot Instances, meaning you want to consider all instance types that match your attributes, include one of these parameters and specify a high value, such as `999999` .", "MemoryGiBPerVCpu": "The minimum and maximum amount of memory per vCPU, in GiB.\n\nDefault: No minimum or maximum limits", "MemoryMiB": "The minimum and maximum amount of memory, in MiB.", "NetworkBandwidthGbps": "The minimum and maximum amount of baseline network bandwidth, in gigabits per second (Gbps). For more information, see [Amazon EC2 instance network bandwidth](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-network-bandwidth.html) in the *Amazon EC2 User Guide* .\n\nDefault: No minimum or maximum limits", "NetworkInterfaceCount": "The minimum and maximum number of network interfaces.\n\nDefault: No minimum or maximum limits", "OnDemandMaxPricePercentageOverLowestPrice": "[Price protection] The price protection threshold for On-Demand Instances, as a percentage higher than an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nTo indicate no price protection threshold, specify a high value, such as `999999` .\n\nThis parameter is not supported for [GetSpotPlacementScores](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetSpotPlacementScores.html) and [GetInstanceTypesFromInstanceRequirements](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetInstanceTypesFromInstanceRequirements.html) .\n\n> If you set `TargetCapacityUnitType` to `vcpu` or `memory-mib` , the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price. \n\nDefault: `20`", "RequireHibernateSupport": "Indicates whether instance types must support hibernation for On-Demand Instances.\n\nThis parameter is not supported for [GetSpotPlacementScores](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetSpotPlacementScores.html) .\n\nDefault: `false`", - "SpotMaxPricePercentageOverLowestPrice": "[Price protection] The price protection threshold for Spot Instances, as a percentage higher than an identified Spot price. The identified Spot price is the Spot price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified Spot price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose Spot price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nTo indicate no price protection threshold, specify a high value, such as `999999` .\n\nIf you set `TargetCapacityUnitType` to `vcpu` or `memory-mib` , the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price.\n\nThis parameter is not supported for [GetSpotPlacementScores](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetSpotPlacementScores.html) and [GetInstanceTypesFromInstanceRequirements](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetInstanceTypesFromInstanceRequirements.html) .\n\n> Only one of `SpotMaxPricePercentageOverLowestPrice` or `MaxSpotPriceAsPercentageOfOptimalOnDemandPrice` can be specified. If you don't specify either, then `SpotMaxPricePercentageOverLowestPrice` is used and the value for that parameter defaults to `100` . \n\nDefault: `100`", + "SpotMaxPricePercentageOverLowestPrice": "[Price protection] The price protection threshold for Spot Instances, as a percentage higher than an identified Spot price. The identified Spot price is the Spot price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified Spot price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose Spot price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nIf you set `TargetCapacityUnitType` to `vcpu` or `memory-mib` , the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price.\n\nThis parameter is not supported for [GetSpotPlacementScores](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetSpotPlacementScores.html) and [GetInstanceTypesFromInstanceRequirements](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetInstanceTypesFromInstanceRequirements.html) .\n\n> Only one of `SpotMaxPricePercentageOverLowestPrice` or `MaxSpotPriceAsPercentageOfOptimalOnDemandPrice` can be specified. If you don't specify either, Amazon EC2 will automatically apply optimal price protection to consistently select from a wide range of instance types. To indicate no price protection threshold for Spot Instances, meaning you want to consider all instance types that match your attributes, include one of these parameters and specify a high value, such as `999999` . \n\nDefault: `100`", "TotalLocalStorageGB": "The minimum and maximum amount of total local storage, in GB.\n\nDefault: No minimum or maximum limits", "VCpuCount": "The minimum and maximum number of vCPUs." }, @@ -10588,14 +10646,14 @@ "InstanceGenerations": "Indicates whether current or previous generation instance types are included. The current generation instance types are recommended for use. Current generation instance types are typically the latest two to three generations in each instance family. For more information, see [Instance types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) in the *Amazon EC2 User Guide* .\n\nFor current generation instance types, specify `current` .\n\nFor previous generation instance types, specify `previous` .\n\nDefault: Current and previous generation instance types", "LocalStorage": "Indicates whether instance types with instance store volumes are included, excluded, or required. For more information, [Amazon EC2 instance store](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/InstanceStorage.html) in the *Amazon EC2 User Guide* .\n\n- To include instance types with instance store volumes, specify `included` .\n- To require only instance types with instance store volumes, specify `required` .\n- To exclude instance types with instance store volumes, specify `excluded` .\n\nDefault: `included`", "LocalStorageTypes": "The type of local storage that is required.\n\n- For instance types with hard disk drive (HDD) storage, specify `hdd` .\n- For instance types with solid state drive (SSD) storage, specify `ssd` .\n\nDefault: `hdd` and `ssd`", - "MaxSpotPriceAsPercentageOfOptimalOnDemandPrice": "[Price protection] The price protection threshold for Spot Instances, as a percentage of an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nTo indicate no price protection threshold, specify a high value, such as `999999` .\n\nIf you set `DesiredCapacityType` to `vcpu` or `memory-mib` , the price protection threshold is based on the per vCPU or per memory price instead of the per instance price.\n\n> Only one of `SpotMaxPricePercentageOverLowestPrice` or `MaxSpotPriceAsPercentageOfOptimalOnDemandPrice` can be specified. If you don't specify either, then `SpotMaxPricePercentageOverLowestPrice` is used and the value for that parameter defaults to `100` .", + "MaxSpotPriceAsPercentageOfOptimalOnDemandPrice": "[Price protection] The price protection threshold for Spot Instances, as a percentage of an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nIf you set `DesiredCapacityType` to `vcpu` or `memory-mib` , the price protection threshold is based on the per vCPU or per memory price instead of the per instance price.\n\n> Only one of `SpotMaxPricePercentageOverLowestPrice` or `MaxSpotPriceAsPercentageOfOptimalOnDemandPrice` can be specified. If you don't specify either, Amazon EC2 will automatically apply optimal price protection to consistently select from a wide range of instance types. To indicate no price protection threshold for Spot Instances, meaning you want to consider all instance types that match your attributes, include one of these parameters and specify a high value, such as `999999` .", "MemoryGiBPerVCpu": "The minimum and maximum amount of memory per vCPU, in GiB.\n\nDefault: No minimum or maximum limits", "MemoryMiB": "The minimum and maximum amount of memory, in MiB.", "NetworkBandwidthGbps": "The minimum and maximum amount of network bandwidth, in gigabits per second (Gbps).\n\nDefault: No minimum or maximum limits", "NetworkInterfaceCount": "The minimum and maximum number of network interfaces.\n\nDefault: No minimum or maximum limits", "OnDemandMaxPricePercentageOverLowestPrice": "[Price protection] The price protection threshold for On-Demand Instances, as a percentage higher than an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nTo turn off price protection, specify a high value, such as `999999` .\n\nThis parameter is not supported for [GetSpotPlacementScores](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetSpotPlacementScores.html) and [GetInstanceTypesFromInstanceRequirements](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetInstanceTypesFromInstanceRequirements.html) .\n\n> If you set `TargetCapacityUnitType` to `vcpu` or `memory-mib` , the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price. \n\nDefault: `20`", "RequireHibernateSupport": "Indicates whether instance types must support hibernation for On-Demand Instances.\n\nThis parameter is not supported for [GetSpotPlacementScores](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetSpotPlacementScores.html) .\n\nDefault: `false`", - "SpotMaxPricePercentageOverLowestPrice": "[Price protection] The price protection threshold for Spot Instances, as a percentage higher than an identified Spot price. The identified Spot price is the Spot price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified Spot price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose Spot price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nTo indicate no price protection threshold, specify a high value, such as `999999` .\n\nIf you set `TargetCapacityUnitType` to `vcpu` or `memory-mib` , the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price.\n\nThis parameter is not supported for [GetSpotPlacementScores](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetSpotPlacementScores.html) and [GetInstanceTypesFromInstanceRequirements](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetInstanceTypesFromInstanceRequirements.html) .\n\n> Only one of `SpotMaxPricePercentageOverLowestPrice` or `MaxSpotPriceAsPercentageOfOptimalOnDemandPrice` can be specified. If you don't specify either, then `SpotMaxPricePercentageOverLowestPrice` is used and the value for that parameter defaults to `100` . \n\nDefault: `100`", + "SpotMaxPricePercentageOverLowestPrice": "[Price protection] The price protection threshold for Spot Instances, as a percentage higher than an identified Spot price. The identified Spot price is the Spot price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified Spot price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose Spot price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nIf you set `TargetCapacityUnitType` to `vcpu` or `memory-mib` , the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price.\n\nThis parameter is not supported for [GetSpotPlacementScores](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetSpotPlacementScores.html) and [GetInstanceTypesFromInstanceRequirements](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetInstanceTypesFromInstanceRequirements.html) .\n\n> Only one of `SpotMaxPricePercentageOverLowestPrice` or `MaxSpotPriceAsPercentageOfOptimalOnDemandPrice` can be specified. If you don't specify either, Amazon EC2 will automatically apply optimal price protection to consistently select from a wide range of instance types. To indicate no price protection threshold for Spot Instances, meaning you want to consider all instance types that match your attributes, include one of these parameters and specify a high value, such as `999999` . \n\nDefault: `100`", "TotalLocalStorageGB": "The minimum and maximum amount of total local storage, in GB.\n\nDefault: No minimum or maximum limits", "VCpuCount": "The minimum and maximum number of vCPUs." }, @@ -11090,9 +11148,17 @@ "AWS::EC2::NetworkInterfaceAttachment": { "DeleteOnTermination": "Whether to delete the network interface when the instance terminates. By default, this value is set to `true` .", "DeviceIndex": "The network interface's position in the attachment order. For example, the first attached network interface has a `DeviceIndex` of 0.", + "EnaSrdSpecification": "Configures ENA Express for the network interface that this action attaches to the instance.", "InstanceId": "The ID of the instance to which you will attach the ENI.", "NetworkInterfaceId": "The ID of the ENI that you want to attach." }, + "AWS::EC2::NetworkInterfaceAttachment EnaSrdSpecification": { + "EnaSrdEnabled": "Indicates whether ENA Express is enabled for the network interface.", + "EnaSrdUdpSpecification": "Configures ENA Express for UDP network traffic." + }, + "AWS::EC2::NetworkInterfaceAttachment EnaSrdUdpSpecification": { + "EnaSrdUdpEnabled": "Indicates whether UDP traffic to and from the instance uses ENA Express. To specify this setting, you must first enable ENA Express." + }, "AWS::EC2::NetworkInterfacePermission": { "AwsAccountId": "The AWS account ID.", "NetworkInterfaceId": "The ID of the network interface.", @@ -11293,13 +11359,14 @@ "InstanceGenerations": "Indicates whether current or previous generation instance types are included. The current generation instance types are recommended for use. Current generation instance types are typically the latest two to three generations in each instance family. For more information, see [Instance types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) in the *Amazon EC2 User Guide* .\n\nFor current generation instance types, specify `current` .\n\nFor previous generation instance types, specify `previous` .\n\nDefault: Current and previous generation instance types", "LocalStorage": "Indicates whether instance types with instance store volumes are included, excluded, or required. For more information, [Amazon EC2 instance store](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/InstanceStorage.html) in the *Amazon EC2 User Guide* .\n\n- To include instance types with instance store volumes, specify `included` .\n- To require only instance types with instance store volumes, specify `required` .\n- To exclude instance types with instance store volumes, specify `excluded` .\n\nDefault: `included`", "LocalStorageTypes": "The type of local storage that is required.\n\n- For instance types with hard disk drive (HDD) storage, specify `hdd` .\n- For instance types with solid state drive (SSD) storage, specify `ssd` .\n\nDefault: `hdd` and `ssd`", + "MaxSpotPriceAsPercentageOfOptimalOnDemandPrice": "[Price protection] The price protection threshold for Spot Instances, as a percentage of an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nIf you set `DesiredCapacityType` to `vcpu` or `memory-mib` , the price protection threshold is based on the per vCPU or per memory price instead of the per instance price.\n\n> Only one of `SpotMaxPricePercentageOverLowestPrice` or `MaxSpotPriceAsPercentageOfOptimalOnDemandPrice` can be specified. If you don't specify either, Amazon EC2 will automatically apply optimal price protection to consistently select from a wide range of instance types. To indicate no price protection threshold for Spot Instances, meaning you want to consider all instance types that match your attributes, include one of these parameters and specify a high value, such as `999999` .", "MemoryGiBPerVCpu": "The minimum and maximum amount of memory per vCPU, in GiB.\n\nDefault: No minimum or maximum limits", "MemoryMiB": "The minimum and maximum amount of memory, in MiB.", "NetworkBandwidthGbps": "The minimum and maximum amount of baseline network bandwidth, in gigabits per second (Gbps). For more information, see [Amazon EC2 instance network bandwidth](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-network-bandwidth.html) in the *Amazon EC2 User Guide* .\n\nDefault: No minimum or maximum limits", "NetworkInterfaceCount": "The minimum and maximum number of network interfaces.\n\nDefault: No minimum or maximum limits", "OnDemandMaxPricePercentageOverLowestPrice": "[Price protection] The price protection threshold for On-Demand Instances, as a percentage higher than an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nTo indicate no price protection threshold, specify a high value, such as `999999` .\n\nThis parameter is not supported for [GetSpotPlacementScores](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetSpotPlacementScores.html) and [GetInstanceTypesFromInstanceRequirements](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetInstanceTypesFromInstanceRequirements.html) .\n\n> If you set `TargetCapacityUnitType` to `vcpu` or `memory-mib` , the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price. \n\nDefault: `20`", "RequireHibernateSupport": "Indicates whether instance types must support hibernation for On-Demand Instances.\n\nThis parameter is not supported for [GetSpotPlacementScores](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetSpotPlacementScores.html) .\n\nDefault: `false`", - "SpotMaxPricePercentageOverLowestPrice": "[Price protection] The price protection threshold for Spot Instances, as a percentage higher than an identified Spot price. The identified Spot price is the Spot price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified Spot price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose Spot price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nTo indicate no price protection threshold, specify a high value, such as `999999` .\n\nIf you set `TargetCapacityUnitType` to `vcpu` or `memory-mib` , the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price.\n\nThis parameter is not supported for [GetSpotPlacementScores](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetSpotPlacementScores.html) and [GetInstanceTypesFromInstanceRequirements](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetInstanceTypesFromInstanceRequirements.html) .\n\n> Only one of `SpotMaxPricePercentageOverLowestPrice` or `MaxSpotPriceAsPercentageOfOptimalOnDemandPrice` can be specified. If you don't specify either, then `SpotMaxPricePercentageOverLowestPrice` is used and the value for that parameter defaults to `100` . \n\nDefault: `100`", + "SpotMaxPricePercentageOverLowestPrice": "[Price protection] The price protection threshold for Spot Instances, as a percentage higher than an identified Spot price. The identified Spot price is the Spot price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified Spot price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose Spot price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nIf you set `TargetCapacityUnitType` to `vcpu` or `memory-mib` , the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price.\n\nThis parameter is not supported for [GetSpotPlacementScores](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetSpotPlacementScores.html) and [GetInstanceTypesFromInstanceRequirements](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetInstanceTypesFromInstanceRequirements.html) .\n\n> Only one of `SpotMaxPricePercentageOverLowestPrice` or `MaxSpotPriceAsPercentageOfOptimalOnDemandPrice` can be specified. If you don't specify either, Amazon EC2 will automatically apply optimal price protection to consistently select from a wide range of instance types. To indicate no price protection threshold for Spot Instances, meaning you want to consider all instance types that match your attributes, include one of these parameters and specify a high value, such as `999999` . \n\nDefault: `100`", "TotalLocalStorageGB": "The minimum and maximum amount of total local storage, in GB.\n\nDefault: No minimum or maximum limits", "VCpuCount": "The minimum and maximum number of vCPUs." }, @@ -11314,7 +11381,7 @@ "Priority": "The priority for the launch template override. The highest priority is launched first.\n\nIf `OnDemandAllocationStrategy` is set to `prioritized` , Spot Fleet uses priority to determine which launch template override to use first in fulfilling On-Demand capacity.\n\nIf the Spot `AllocationStrategy` is set to `capacityOptimizedPrioritized` , Spot Fleet uses priority on a best-effort basis to determine which launch template override to use in fulfilling Spot capacity, but optimizes for capacity first.\n\nValid values are whole numbers starting at `0` . The lower the number, the higher the priority. If no number is set, the launch template override has the lowest priority. You can set the same priority for different launch template overrides.", "SpotPrice": "The maximum price per unit hour that you are willing to pay for a Spot Instance. We do not recommend using this parameter because it can lead to increased interruptions. If you do not specify this parameter, you will pay the current Spot price.\n\n> If you specify a maximum price, your instances will be interrupted more frequently than if you do not specify this parameter.", "SubnetId": "The ID of the subnet in which to launch the instances.", - "WeightedCapacity": "The number of units provided by the specified instance type." + "WeightedCapacity": "The number of units provided by the specified instance type.\n\n> When specifying weights, the price used in the `lowest-price` and `price-capacity-optimized` allocation strategies is per *unit* hour (where the instance price is divided by the specified weight). However, if all the specified weights are above the requested `TargetCapacity` , resulting in only 1 instance being launched, the price used is per *instance* hour." }, "AWS::EC2::SpotFleet LoadBalancersConfig": { "ClassicLoadBalancersConfig": "The Classic Load Balancers.", @@ -12398,6 +12465,7 @@ "Scale": "A floating-point percentage of your desired number of tasks to place and keep running in the task set.", "Service": "The short name or full Amazon Resource Name (ARN) of the service to create the task set in.", "ServiceRegistries": "The details of the service discovery registries to assign to this task set. For more information, see [Service discovery](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-discovery.html) .", + "Tags": "The metadata that you apply to the task set to help you categorize and organize them. Each tag consists of a key and an optional value. You define both.\n\nThe following basic restrictions apply to tags:\n\n- Maximum number of tags per resource - 50\n- For each resource, each tag key must be unique, and each tag key can have only one value.\n- Maximum key length - 128 Unicode characters in UTF-8\n- Maximum value length - 256 Unicode characters in UTF-8\n- If your tagging schema is used across multiple services and resources, remember that other services may have restrictions on allowed characters. Generally allowed characters are: letters, numbers, and spaces representable in UTF-8, and the following characters: + - = . _ : / @.\n- Tag keys and values are case-sensitive.\n- Do not use `aws:` , `AWS:` , or any upper or lowercase combination of such as a prefix for either keys or values as it is reserved for AWS use. You cannot edit or delete tag keys or values with this prefix. Tags with this prefix do not count against your tags per resource limit.", "TaskDefinition": "The task definition for the tasks in the task set to use. If a revision isn't specified, the latest `ACTIVE` revision is used." }, "AWS::ECS::TaskSet AwsVpcConfiguration": { @@ -12423,6 +12491,10 @@ "Port": "The port value used if your service discovery service specified an SRV record. This field might be used if both the `awsvpc` network mode and SRV records are used.", "RegistryArn": "The Amazon Resource Name (ARN) of the service registry. The currently supported service registry is AWS Cloud Map . For more information, see [CreateService](https://docs.aws.amazon.com/cloud-map/latest/api/API_CreateService.html) ." }, + "AWS::ECS::TaskSet Tag": { + "Key": "One part of a key-value pair that make up a tag. A `key` is a general label that acts like a category for more specific tag values.", + "Value": "The optional part of a key-value pair that make up a tag. A `value` acts as a descriptor within a tag category (key)." + }, "AWS::EFS::AccessPoint": { "AccessPointTags": "An array of key-value pairs to apply to this resource.\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html) .", "ClientToken": "The opaque string specified in the request to ensure idempotent creation.", @@ -12814,7 +12886,8 @@ "ServiceAccessSecurityGroup": "The identifier of the Amazon EC2 security group for the Amazon EMR service to access clusters in VPC private subnets.", "TaskInstanceFleets": "Describes the EC2 instances and instance configurations for the task instance fleets when using clusters with the instance fleet configuration. These task instance fleets are added to the cluster as part of the cluster launch. Each task instance fleet must have a unique name specified so that CloudFormation can differentiate between the task instance fleets.\n\n> You can currently specify only one task instance fleet for a cluster. After creating the cluster, you can only modify the mutable properties of `InstanceFleetConfig` , which are `TargetOnDemandCapacity` and `TargetSpotCapacity` . Modifying any other property results in cluster replacement. > To allow a maximum of 30 Amazon EC2 instance types per fleet, include `TaskInstanceFleets` when you create your cluster. If you create your cluster without `TaskInstanceFleets` , Amazon EMR uses its default allocation strategy, which allows for a maximum of five Amazon EC2 instance types.", "TaskInstanceGroups": "Describes the EC2 instances and instance configurations for task instance groups when using clusters with the uniform instance group configuration. These task instance groups are added to the cluster as part of the cluster launch. Each task instance group must have a unique name specified so that CloudFormation can differentiate between the task instance groups.\n\n> After creating the cluster, you can only modify the mutable properties of `InstanceGroupConfig` , which are `AutoScalingPolicy` and `InstanceCount` . Modifying any other property results in cluster replacement.", - "TerminationProtected": "Specifies whether to lock the cluster to prevent the Amazon EC2 instances from being terminated by API call, user intervention, or in the event of a job-flow error." + "TerminationProtected": "Specifies whether to lock the cluster to prevent the Amazon EC2 instances from being terminated by API call, user intervention, or in the event of a job-flow error.", + "UnhealthyNodeReplacement": "" }, "AWS::EMR::Cluster KerberosAttributes": { "ADDomainJoinPassword": "The Active Directory password for `ADDomainJoinUser` .", @@ -13124,7 +13197,7 @@ "AWS::EMRServerless::Application ConfigurationObject": { "Classification": "", "Configurations": "", - "SensitivePropertiesMap": "" + "Properties": "" }, "AWS::EMRServerless::Application ImageConfigurationInput": { "ImageUri": "The URI of an image in the Amazon ECR registry. This field is required when you create a new application. If you leave this field blank in an update, Amazon EMR will remove the image configuration." @@ -13193,7 +13266,7 @@ "Port": "The port number on which each of the cache nodes accepts connections.", "PreferredAvailabilityZone": "The EC2 Availability Zone in which the cluster is created.\n\nAll nodes belonging to this cluster are placed in the preferred Availability Zone. If you want to create your nodes across multiple Availability Zones, use `PreferredAvailabilityZones` .\n\nDefault: System chosen Availability Zone.", "PreferredAvailabilityZones": "A list of the Availability Zones in which cache nodes are created. The order of the zones in the list is not important.\n\nThis option is only supported on Memcached.\n\n> If you are creating your cluster in an Amazon VPC (recommended) you can only locate nodes in Availability Zones that are associated with the subnets in the selected subnet group.\n> \n> The number of Availability Zones listed must equal the value of `NumCacheNodes` . \n\nIf you want all the nodes in the same Availability Zone, use `PreferredAvailabilityZone` instead, or repeat the Availability Zone multiple times in the list.\n\nDefault: System chosen Availability Zones.", - "PreferredMaintenanceWindow": "Specifies the weekly time range during which maintenance on the cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid values for `ddd` are:\n\nSpecifies the weekly time range during which maintenance on the cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period.\n\nValid values for `ddd` are:\n\n- `sun`\n- `mon`\n- `tue`\n- `wed`\n- `thu`\n- `fri`\n- `sat`\n\nExample: `sun:23:00-mon:01:30`", + "PreferredMaintenanceWindow": "Specifies the weekly time range during which maintenance on the cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period.\n\nValid values for `ddd` are:\n\n- `sun`\n- `mon`\n- `tue`\n- `wed`\n- `thu`\n- `fri`\n- `sat`\n\nExample: `sun:23:00-mon:01:30`", "SnapshotArns": "A single-element string list containing an Amazon Resource Name (ARN) that uniquely identifies a Redis RDB snapshot file stored in Amazon S3. The snapshot file is used to populate the node group (shard). The Amazon S3 object name in the ARN cannot contain any commas.\n\n> This parameter is only valid if the `Engine` parameter is `redis` . \n\nExample of an Amazon S3 ARN: `arn:aws:s3:::my_bucket/snapshot1.rdb`", "SnapshotName": "The name of a Redis snapshot from which to restore data into the new node group (shard). The snapshot status changes to `restoring` while the new node group (shard) is being created.\n\n> This parameter is only valid if the `Engine` parameter is `redis` .", "SnapshotRetentionLimit": "The number of days for which ElastiCache retains automatic snapshots before deleting them. For example, if you set `SnapshotRetentionLimit` to 5, a snapshot taken today is retained for 5 days before being deleted.\n\n> This parameter is only valid if the `Engine` parameter is `redis` . \n\nDefault: 0 (i.e., automatic backups are disabled for this cache cluster).", @@ -13341,12 +13414,12 @@ "CacheUsageLimits": "The cache usage limit for the serverless cache.", "DailySnapshotTime": "The daily time that a cache snapshot will be created. Default is NULL, i.e. snapshots will not be created at a specific time on a daily basis. Available for Redis only.", "Description": "A description of the serverless cache.", - "Endpoint": "Represents the information required for client programs to connect to a cache node.", + "Endpoint": "Represents the information required for client programs to connect to a cache node. This value is read-only.", "Engine": "The engine the serverless cache is compatible with.", "FinalSnapshotName": "The name of the final snapshot taken of a cache before the cache is deleted.", "KmsKeyId": "The ID of the AWS Key Management Service (KMS) key that is used to encrypt data at rest in the serverless cache.", "MajorEngineVersion": "The version number of the engine the serverless cache is compatible with.", - "ReaderEndpoint": "Represents the information required for client programs to connect to a cache node.", + "ReaderEndpoint": "Represents the information required for client programs to connect to a cache node. This value is read-only.", "SecurityGroupIds": "The IDs of the EC2 security groups associated with the serverless cache.", "ServerlessCacheName": "The unique identifier of the serverless cache.", "SnapshotArnsToRestore": "The ARN of the snapshot from which to restore data into the new cache.", @@ -13386,7 +13459,7 @@ }, "AWS::ElastiCache::User": { "AccessString": "Access permissions string used for this user.", - "AuthenticationMode": "Specifies the authentication mode to use. Below is an example of the possible JSON values:\n\n```\n{ Type: Passwords: [\"*****\", \"******\"] // If Type is password.\n}\n```", + "AuthenticationMode": "Specifies the authentication mode to use. Below is an example of the possible JSON values:\n\n```\n{ Passwords: [\"*****\", \"******\"] // If Type is password.\n}\n```", "Engine": "The current supported value is redis.", "NoPasswordRequired": "Indicates a password is not required for this user.", "Passwords": "Passwords used for this user. You can create up to two passwords for each user.", @@ -13495,7 +13568,7 @@ "AWS::ElasticLoadBalancing::LoadBalancer": { "AccessLoggingPolicy": "Information about where and how access logs are stored for the load balancer.", "AppCookieStickinessPolicy": "Information about a policy for application-controlled session stickiness.", - "AvailabilityZones": "The Availability Zones for the load balancer. For load balancers in a VPC, specify `Subnets` instead.\n\nUpdate requires replacement if you did not previously specify an Availability Zone or if you are removing all Availability Zones. Otherwise, update requires no interruption.", + "AvailabilityZones": "The Availability Zones for a load balancer in a default VPC. For a load balancer in a nondefault VPC, specify `Subnets` instead.\n\nUpdate requires replacement if you did not previously specify an Availability Zone or if you are removing all Availability Zones. Otherwise, update requires no interruption.", "ConnectionDrainingPolicy": "If enabled, the load balancer allows existing requests to complete before the load balancer shifts traffic away from a deregistered or unhealthy instance.\n\nFor more information, see [Configure connection draining](https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/config-conn-drain.html) in the *User Guide for Classic Load Balancers* .", "ConnectionSettings": "If enabled, the load balancer allows the connections to remain idle (no data is sent over the connection) for the specified duration.\n\nBy default, Elastic Load Balancing maintains a 60-second idle connection timeout for both front-end and back-end connections of your load balancer. For more information, see [Configure idle connection timeout](https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/config-idle-timeout.html) in the *User Guide for Classic Load Balancers* .", "CrossZone": "If enabled, the load balancer routes the request traffic evenly across all instances regardless of the Availability Zones.\n\nFor more information, see [Configure cross-zone load balancing](https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/enable-disable-crosszone-lb.html) in the *User Guide for Classic Load Balancers* .", @@ -13565,7 +13638,7 @@ "MutualAuthentication": "The mutual authentication configuration information.", "Port": "The port on which the load balancer is listening. You cannot specify a port for a Gateway Load Balancer.", "Protocol": "The protocol for connections from clients to the load balancer. For Application Load Balancers, the supported protocols are HTTP and HTTPS. For Network Load Balancers, the supported protocols are TCP, TLS, UDP, and TCP_UDP. You can\u2019t specify the UDP or TCP_UDP protocol if dual-stack mode is enabled. You cannot specify a protocol for a Gateway Load Balancer.", - "SslPolicy": "[HTTPS and TLS listeners] The security policy that defines which protocols and ciphers are supported.\n\nFor more information, see [Security policies](https://docs.aws.amazon.com/elasticloadbalancing/latest/application/create-https-listener.html#describe-ssl-policies) in the *Application Load Balancers Guide* and [Security policies](https://docs.aws.amazon.com/elasticloadbalancing/latest/network/create-tls-listener.html#describe-ssl-policies) in the *Network Load Balancers Guide* ." + "SslPolicy": "[HTTPS and TLS listeners] The security policy that defines which protocols and ciphers are supported.\n\nUpdating the security policy can result in interruptions if the load balancer is handling a high volume of traffic.\n\nFor more information, see [Security policies](https://docs.aws.amazon.com/elasticloadbalancing/latest/application/create-https-listener.html#describe-ssl-policies) in the *Application Load Balancers Guide* and [Security policies](https://docs.aws.amazon.com/elasticloadbalancing/latest/network/create-tls-listener.html#describe-ssl-policies) in the *Network Load Balancers Guide* ." }, "AWS::ElasticLoadBalancingV2::Listener Action": { "AuthenticateCognitoConfig": "[HTTPS listeners] Information for using Amazon Cognito to authenticate users. Specify only when `Type` is `authenticate-cognito` .", @@ -14455,18 +14528,18 @@ "AWS::FIS::ExperimentTemplate ExperimentTemplateAction": { "ActionId": "The ID of the action.", "Description": "A description for the action.", - "ExperimentTemplateActionItemParameter": "The parameters for the action.", - "ExperimentTemplateActionItemTarget": "The targets for the action.", - "StartAfter": "The name of the action that must be completed before the current action starts." + "Parameters": "The parameters for the action.", + "StartAfter": "The name of the action that must be completed before the current action starts.", + "Targets": "The targets for the action." }, "AWS::FIS::ExperimentTemplate ExperimentTemplateExperimentOptions": { "AccountTargeting": "The account targeting setting for an experiment template.", "EmptyTargetResolutionMode": "The empty target resolution mode for an experiment template." }, "AWS::FIS::ExperimentTemplate ExperimentTemplateLogConfiguration": { - "CloudWatchLogsConfiguration": "The configuration for experiment logging to Amazon CloudWatch Logs.", + "CloudWatchLogsConfiguration": "The configuration for experiment logging to CloudWatch Logs .", "LogSchemaVersion": "The schema version.", - "S3Configuration": "The configuration for experiment logging to Amazon S3." + "S3Configuration": "The configuration for experiment logging to Amazon S3 ." }, "AWS::FIS::ExperimentTemplate ExperimentTemplateStopCondition": { "Source": "The source for the stop condition.", @@ -14474,7 +14547,7 @@ }, "AWS::FIS::ExperimentTemplate ExperimentTemplateTarget": { "Filters": "The filters to apply to identify target resources using specific attributes.", - "Parameters": "The resource type parameters.", + "Parameters": "The parameters for the resource type.", "ResourceArns": "The Amazon Resource Names (ARNs) of the targets.", "ResourceTags": "The tags for the target resources.", "ResourceType": "The resource type.", @@ -14749,9 +14822,9 @@ "CopyTagsToBackups": "A boolean flag indicating whether tags for the volume should be copied to backups. This value defaults to false. If it's set to true, all tags for the volume are copied to all automatic and user-initiated backups where the user doesn't specify tags. If this value is true, and you specify one or more tags, only the specified tags are copied to backups. If you specify one or more tags when creating a user-initiated backup, no tags are copied from the volume, regardless of this value.", "JunctionPath": "Specifies the location in the SVM's namespace where the volume is mounted. This parameter is required. The `JunctionPath` must have a leading forward slash, such as `/vol3` .", "OntapVolumeType": "Specifies the type of volume you are creating. Valid values are the following:\n\n- `RW` specifies a read/write volume. `RW` is the default.\n- `DP` specifies a data-protection volume. A `DP` volume is read-only and can be used as the destination of a NetApp SnapMirror relationship.\n\nFor more information, see [Volume types](https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/volume-types) in the Amazon FSx for NetApp ONTAP User Guide.", - "SecurityStyle": "Specifies the security style for the volume. If a volume's security style is not specified, it is automatically set to the root volume's security style. The security style determines the type of permissions that FSx for ONTAP uses to control data access. For more information, see [Volume security style](https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/managing-volumes.html#volume-security-style) in the *Amazon FSx for NetApp ONTAP User Guide* . Specify one of the following values:\n\n- `UNIX` if the file system is managed by a UNIX administrator, the majority of users are NFS clients, and an application accessing the data uses a UNIX user as the service account.\n- `NTFS` if the file system is managed by a Windows administrator, the majority of users are SMB clients, and an application accessing the data uses a Windows user as the service account.\n- `MIXED` if the file system is managed by both UNIX and Windows administrators and users consist of both NFS and SMB clients.", + "SecurityStyle": "Specifies the security style for the volume. If a volume's security style is not specified, it is automatically set to the root volume's security style. The security style determines the type of permissions that FSx for ONTAP uses to control data access. For more information, see [Volume security style](https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/volume-security-style) in the *Amazon FSx for NetApp ONTAP User Guide* . Specify one of the following values:\n\n- `UNIX` if the file system is managed by a UNIX administrator, the majority of users are NFS clients, and an application accessing the data uses a UNIX user as the service account.\n- `NTFS` if the file system is managed by a Windows administrator, the majority of users are SMB clients, and an application accessing the data uses a Windows user as the service account.\n- `MIXED` This is an advanced setting. For more information, see the topic [What the security styles and their effects are](https://docs.aws.amazon.com/https://docs.netapp.com/us-en/ontap/nfs-admin/security-styles-their-effects-concept.html) in the NetApp Documentation Center.\n\nFor more information, see [Volume security style](https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/volume-security-style.html) in the FSx for ONTAP User Guide.", "SizeInBytes": "Specifies the configured size of the volume, in bytes.", - "SizeInMegabytes": "*This property has been deprecated. Use `SizeInBytes` .*\n\nSpecifies the size of the volume, in megabytes (MB), that you are creating.", + "SizeInMegabytes": "Use `SizeInBytes` instead. Specifies the size of the volume, in megabytes (MB), that you are creating.", "SnaplockConfiguration": "The SnapLock configuration object for an FSx for ONTAP SnapLock volume.", "SnapshotPolicy": "Specifies the snapshot policy for the volume. There are three built-in snapshot policies:\n\n- `default` : This is the default policy. A maximum of six hourly snapshots taken five minutes past the hour. A maximum of two daily snapshots taken Monday through Saturday at 10 minutes after midnight. A maximum of two weekly snapshots taken every Sunday at 15 minutes after midnight.\n- `default-1weekly` : This policy is the same as the `default` policy except that it only retains one snapshot from the weekly schedule.\n- `none` : This policy does not take any snapshots. This policy can be assigned to volumes to prevent automatic snapshots from being taken.\n\nYou can also provide the name of a custom policy that you created with the ONTAP CLI or REST API.\n\nFor more information, see [Snapshot policies](https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/snapshots-ontap.html#snapshot-policies) in the Amazon FSx for NetApp ONTAP User Guide.", "StorageEfficiencyEnabled": "Set to true to enable deduplication, compression, and compaction storage efficiency features on the volume, or set to false to disable them.\n\n`StorageEfficiencyEnabled` is required when creating a `RW` volume ( `OntapVolumeType` set to `RW` ).", @@ -15321,9 +15394,9 @@ }, "AWS::Glue::Classifier CsvClassifier": { "AllowSingleColumn": "Enables the processing of files that contain only one column.", - "ContainsCustomDatatype": "", + "ContainsCustomDatatype": "Indicates whether the CSV file contains custom data types.", "ContainsHeader": "Indicates whether the CSV file contains a header.\n\nA value of `UNKNOWN` specifies that the classifier will detect whether the CSV file contains headings.\n\nA value of `PRESENT` specifies that the CSV file contains headings.\n\nA value of `ABSENT` specifies that the CSV file does not contain headings.", - "CustomDatatypeConfigured": "Enables the custom datatype to be configured.", + "CustomDatatypeConfigured": "Enables the configuration of custom data types.", "Delimiter": "A custom symbol to denote what separates each column entry in the row.", "DisableValueTrimming": "Specifies not to trim values before identifying the type of column values. The default value is `true` .", "Header": "A list of strings representing column names.", @@ -15395,9 +15468,9 @@ }, "AWS::Glue::Crawler IcebergTarget": { "ConnectionName": "The name of the connection to use to connect to the Iceberg target.", - "Exclusions": "A list of glob patterns used to exclude from the crawl. For more information, see [Catalog Tables with a Crawler](https://docs.aws.amazon.com/glue/latest/dg/add-crawler.html) .", + "Exclusions": "A list of global patterns used to exclude from the crawl.", "MaximumTraversalDepth": "The maximum depth of Amazon S3 paths that the crawler can traverse to discover the Iceberg metadata folder in your Amazon S3 path. Used to limit the crawler run time.", - "Paths": "One or more Amazon S3 paths that contains Iceberg metadata folders as `s3://bucket/prefix` ." + "Paths": "One or more Amazon S3 paths that contains Iceberg metadata folders as s3://bucket/prefix ." }, "AWS::Glue::Crawler JdbcTarget": { "ConnectionName": "The name of the connection to use to connect to the JDBC target.", @@ -15430,7 +15503,7 @@ "CatalogTargets": "Specifies AWS Glue Data Catalog targets.", "DeltaTargets": "Specifies an array of Delta data store targets.", "DynamoDBTargets": "Specifies Amazon DynamoDB targets.", - "IcebergTargets": "", + "IcebergTargets": "Specifies Apache Iceberg data store targets.", "JdbcTargets": "Specifies JDBC targets.", "MongoDBTargets": "A list of Mongo DB targets.", "S3Targets": "Specifies Amazon Simple Storage Service (Amazon S3) targets." @@ -15439,7 +15512,7 @@ "ContextWords": "A list of context words. If none of these context words are found within the vicinity of the regular expression the data will not be detected as sensitive data.\n\nIf no context words are passed only a regular expression is checked.", "Name": "A name for the custom pattern that allows it to be retrieved or deleted later. This name must be unique per AWS account.", "RegexString": "A regular expression string that is used for detecting sensitive data in a custom pattern.", - "Tags": "" + "Tags": "AWS tags that contain a key value pair and may be searched by console, command line, or API." }, "AWS::Glue::DataCatalogEncryptionSettings": { "CatalogId": "The ID of the Data Catalog in which the settings are created.", @@ -15455,6 +15528,7 @@ }, "AWS::Glue::DataCatalogEncryptionSettings EncryptionAtRest": { "CatalogEncryptionMode": "The encryption-at-rest mode for encrypting Data Catalog data.", + "CatalogEncryptionServiceRole": "The role that AWS Glue assumes to encrypt and decrypt the Data Catalog objects on the caller's behalf.", "SseAwsKmsKeyId": "The ID of the AWS KMS key to use for encryption at rest." }, "AWS::Glue::DataQualityRuleset": { @@ -15479,7 +15553,7 @@ "AWS::Glue::Database DatabaseIdentifier": { "CatalogId": "The ID of the Data Catalog in which the database resides.", "DatabaseName": "The name of the catalog database.", - "Region": "Region of the target database." + "Region": "The Region of the database." }, "AWS::Glue::Database DatabaseInput": { "CreateTableDefaultPermissions": "Creates a set of default permissions on the table for principals. Used by AWS Lake Formation . Not used in the normal course of AWS Glue operations.", @@ -15718,7 +15792,7 @@ "AWS::Glue::Table": { "CatalogId": "The ID of the Data Catalog in which to create the `Table` .", "DatabaseName": "The name of the database where the table metadata resides. For Hive compatibility, this must be all lowercase.", - "OpenTableFormatInput": "A structure representing an open format table.", + "OpenTableFormatInput": "Specifies an `OpenTableFormatInput` structure when creating an open format table.", "TableInput": "A structure used to define a table." }, "AWS::Glue::Table Column": { @@ -15727,7 +15801,7 @@ "Type": "The data type of the `Column` ." }, "AWS::Glue::Table IcebergInput": { - "MetadataOperation": "A required metadata operation. Can only be set to `CREATE` .", + "MetadataOperation": "A required metadata operation. Can only be set to CREATE.", "Version": "The table version for the Iceberg table. Defaults to 2." }, "AWS::Glue::Table OpenTableFormatInput": { @@ -15776,7 +15850,7 @@ "CatalogId": "The ID of the Data Catalog in which the table resides.", "DatabaseName": "The name of the catalog database that contains the target table.", "Name": "The name of the target table.", - "Region": "Region of the target table." + "Region": "The Region of the table." }, "AWS::Glue::Table TableInput": { "Description": "A description of the table.", @@ -15792,15 +15866,15 @@ "ViewOriginalText": "Included for Apache Hive compatibility. Not used in the normal course of AWS Glue operations. If the table is a `VIRTUAL_VIEW` , certain Athena configuration encoded in base64." }, "AWS::Glue::TableOptimizer": { - "CatalogId": "", + "CatalogId": "The catalog ID of the table.", "DatabaseName": "The name of the database. For Hive compatibility, this is folded to lowercase when it is stored.", "TableName": "The table name. For Hive compatibility, this must be entirely lowercase.", "TableOptimizerConfiguration": "", - "Type": "" + "Type": "The type of table optimizer. Currently, the only valid value is compaction." }, "AWS::Glue::TableOptimizer TableOptimizerConfiguration": { - "Enabled": "", - "RoleArn": "" + "Enabled": "Whether the table optimization is enabled.", + "RoleArn": "A role passed by the caller which gives the service permission to update the resources associated with the optimizer on the caller's behalf." }, "AWS::Glue::Trigger": { "Actions": "The actions initiated by this trigger.", @@ -16479,8 +16553,8 @@ "Value": "" }, "AWS::GroundStation::MissionProfile": { - "ContactPostPassDurationSeconds": "Amount of time in seconds after a contact ends that you\u2019d like to receive a CloudWatch Event indicating the pass has finished. For more information on CloudWatch Events, see the [What Is CloudWatch Events?](https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/WhatIsCloudWatchEvents.html)", - "ContactPrePassDurationSeconds": "Amount of time in seconds prior to contact start that you'd like to receive a CloudWatch Event indicating an upcoming pass. For more information on CloudWatch Events, see the [What Is CloudWatch Events?](https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/WhatIsCloudWatchEvents.html)", + "ContactPostPassDurationSeconds": "Amount of time in seconds after a contact ends that you\u2019d like to receive a Ground Station Contact State Change indicating the pass has finished.", + "ContactPrePassDurationSeconds": "Amount of time in seconds prior to contact start that you'd like to receive a Ground Station Contact State Change Event indicating an upcoming pass.", "DataflowEdges": "A list containing lists of config ARNs. Each list of config ARNs is an edge, with a \"from\" config and a \"to\" config.", "MinimumViableContactDurationSeconds": "Minimum length of a contact in seconds that Ground Station will return when listing contacts. Ground Station will not return contacts shorter than this duration.", "Name": "The name of the mission profile.", @@ -16585,12 +16659,14 @@ }, "AWS::GuardDuty::Master": { "DetectorId": "The unique ID of the detector of the GuardDuty member account.", - "InvitationId": "The ID of the invitation that is sent to the account designated as a member account. You can find the invitation ID by using the ListInvitation action of the GuardDuty API." + "InvitationId": "The ID of the invitation that is sent to the account designated as a member account. You can find the invitation ID by using the ListInvitation action of the GuardDuty API.", + "MasterId": "The AWS account ID of the account designated as the GuardDuty administrator account." }, "AWS::GuardDuty::Member": { "DetectorId": "The ID of the detector associated with the GuardDuty service to add the member to.", "DisableEmailNotification": "Specifies whether or not to disable email notification for the member account that you invite.", "Email": "The email address associated with the member account.", + "MemberId": "The AWS account ID of the account to designate as a member.", "Message": "The invitation message that you want to send to the accounts that you're inviting to GuardDuty as members.", "Status": "You can use the `Status` property to update the status of the relationship between the member account and its administrator account. Valid values are `Created` and `Invited` when using an `AWS::GuardDuty::Member` resource. If the value for this property is not provided or set to `Created` , a member account is created but not invited. If the value of this property is set to `Invited` , a member account is created and invited." }, @@ -17023,16 +17099,16 @@ "Service": "Specifies the service in which this image was registered." }, "AWS::ImageBuilder::Image": { - "ContainerRecipeArn": "The Amazon Resource Name (ARN) of the container recipe that is used for this pipeline.", - "DistributionConfigurationArn": "The Amazon Resource Name (ARN) of the distribution configuration.", - "EnhancedImageMetadataEnabled": "Indicates whether Image Builder collects additional information about the image, such as the operating system (OS) version and package list.", + "ContainerRecipeArn": "The Amazon Resource Name (ARN) of the container recipe that defines how images are configured and tested.", + "DistributionConfigurationArn": "The Amazon Resource Name (ARN) of the distribution configuration that defines and configures the outputs of your pipeline.", + "EnhancedImageMetadataEnabled": "Collects additional information about the image being created, including the operating system (OS) version and package list. This information is used to enhance the overall experience of using EC2 Image Builder. Enabled by default.", "ExecutionRole": "The name or Amazon Resource Name (ARN) for the IAM role you create that grants Image Builder access to perform workflow actions.", - "ImageRecipeArn": "The Amazon Resource Name (ARN) of the image recipe.", + "ImageRecipeArn": "The Amazon Resource Name (ARN) of the image recipe that defines how images are configured, tested, and assessed.", "ImageScanningConfiguration": "Contains settings for vulnerability scans.", - "ImageTestsConfiguration": "The configuration settings for your image test components, which includes a toggle that allows you to turn off tests, and a timeout setting.", - "InfrastructureConfigurationArn": "The Amazon Resource Name (ARN) of the infrastructure configuration associated with this image pipeline.", - "Tags": "The tags that apply to this image.", - "Workflows": "Contains the build and test workflows that are associated with the image." + "ImageTestsConfiguration": "The image tests configuration of the image.", + "InfrastructureConfigurationArn": "The Amazon Resource Name (ARN) of the infrastructure configuration that defines the environment in which your image will be built and tested.", + "Tags": "The tags of the image.", + "Workflows": "Contains an array of workflow configuration objects." }, "AWS::ImageBuilder::Image EcrConfiguration": { "ContainerTags": "Tags for Image Builder to apply to the output container image that &INS; scans. Tags can help you identify and manage your scanned images.", @@ -17860,7 +17936,7 @@ "ThingTypeDescription": "The description of the thing type." }, "AWS::IoT::TopicRule": { - "RuleName": "The name of the rule.", + "RuleName": "The name of the rule.\n\n*Pattern* : `[a-zA-Z0-9:_-]+`", "Tags": "Metadata which can be used to manage the topic rule.\n\n> For URI Request parameters use format: ...key1=value1&key2=value2...\n> \n> For the CLI command-line parameter use format: --tags \"key1=value1&key2=value2...\"\n> \n> For the cli-input-json file use format: \"tags\": \"key1=value1&key2=value2...\"", "TopicRulePayload": "The rule payload." }, @@ -18462,7 +18538,7 @@ }, "AWS::IoTEvents::AlarmModel Firehose": { "DeliveryStreamName": "The name of the Kinesis Data Firehose delivery stream where the data is written.", - "Payload": "You can configure the action payload when you send a message to an Amazon Kinesis Data Firehose delivery stream.", + "Payload": "You can configure the action payload when you send a message to an Amazon Data Firehose delivery stream.", "Separator": "A character separator that is used to separate records written to the Kinesis Data Firehose delivery stream. Valid values are: '\\n' (newline), '\\t' (tab), '\\r\\n' (Windows newline), ',' (comma)." }, "AWS::IoTEvents::AlarmModel InitializationConfiguration": { @@ -18578,7 +18654,7 @@ }, "AWS::IoTEvents::DetectorModel Firehose": { "DeliveryStreamName": "The name of the Kinesis Data Firehose delivery stream where the data is written.", - "Payload": "You can configure the action payload when you send a message to an Amazon Kinesis Data Firehose delivery stream.", + "Payload": "You can configure the action payload when you send a message to an Amazon Data Firehose delivery stream.", "Separator": "A character separator that is used to separate records written to the Kinesis Data Firehose delivery stream. Valid values are: '\\n' (newline), '\\t' (tab), '\\r\\n' (Windows newline), ',' (comma)." }, "AWS::IoTEvents::DetectorModel IotEvents": { @@ -19447,6 +19523,7 @@ "LastUplinkReceivedAt": "The date and time when the most recent uplink was received.", "LoRaWAN": "The device configuration information to use to create the wireless device. Must be at least one of OtaaV10x, OtaaV11, AbpV11, or AbpV10x.", "Name": "The name of the new resource.", + "Positioning": "", "Tags": "The tags are an array of key-value pairs to attach to the specified resource. Tags can have a minimum of 0 and a maximum of 50 items.", "ThingArn": "The ARN of the thing to associate with the wireless device.", "Type": "The wireless device type." @@ -19459,11 +19536,20 @@ "DevAddr": "The DevAddr value.", "SessionKeys": "Session keys for ABP v1.1." }, + "AWS::IoTWireless::WirelessDevice Application": { + "DestinationName": "", + "FPort": "", + "Type": "" + }, + "AWS::IoTWireless::WirelessDevice FPorts": { + "Applications": "" + }, "AWS::IoTWireless::WirelessDevice LoRaWANDevice": { "AbpV10x": "ABP device object for LoRaWAN specification v1.0.x.", "AbpV11": "ABP device object for create APIs for v1.1.", "DevEui": "The DevEUI value.", "DeviceProfileId": "The ID of the device profile for the new wireless device.", + "FPorts": "", "OtaaV10x": "OTAA device object for create APIs for v1.0.x", "OtaaV11": "OTAA device object for v1.1 for create APIs.", "ServiceProfileId": "The ID of the service profile." @@ -19732,7 +19818,7 @@ "DatabaseConfiguration": "Provides the configuration information to connect to a database as your data source.", "GoogleDriveConfiguration": "Provides the configuration information to connect to Google Drive as your data source.", "OneDriveConfiguration": "Provides the configuration information to connect to Microsoft OneDrive as your data source.", - "S3Configuration": "Provides the configuration information to connect to an Amazon S3 bucket as your data source.", + "S3Configuration": "Provides the configuration information to connect to an Amazon S3 bucket as your data source.\n\n> Amazon Kendra now supports an upgraded Amazon S3 connector.\n> \n> You must now use the [TemplateConfiguration](https://docs.aws.amazon.com/kendra/latest/APIReference/API_TemplateConfiguration.html) object instead of the `S3DataSourceConfiguration` object to configure your connector.\n> \n> Connectors configured using the older console and API architecture will continue to function as configured. However, you won't be able to edit or update them. If you want to edit or update your connector configuration, you must create a new connector.\n> \n> We recommended migrating your connector workflow to the upgraded version. Support for connectors configured using the older architecture is scheduled to end by June 2024.", "SalesforceConfiguration": "Provides the configuration information to connect to Salesforce as your data source.", "ServiceNowConfiguration": "Provides the configuration information to connect to ServiceNow as your data source.", "SharePointConfiguration": "Provides the configuration information to connect to Microsoft SharePoint as your data source.", @@ -19816,8 +19902,8 @@ "AccessControlListConfiguration": "Provides the path to the S3 bucket that contains the user context filtering files for the data source. For the format of the file, see [Access control for S3 data sources](https://docs.aws.amazon.com/kendra/latest/dg/s3-acl.html) .", "BucketName": "The name of the bucket that contains the documents.", "DocumentsMetadataConfiguration": "Specifies document metadata files that contain information such as the document access control information, source URI, document author, and custom attributes. Each metadata file contains metadata about a single document.", - "ExclusionPatterns": "A list of glob patterns (patterns that can expand a wildcard pattern into a list of path names that match the given pattern) for file names and file types that should not be indexed. If a document that matches an inclusion prefix or inclusion pattern also matches an exclusion pattern, the document is not indexed. Examples of glob patterns include:\n\n- `/myapp/config/*` - All files inside config directory\n- `/**/*.png` - All .png files in all directories\n- `/**/*.{png,ico,md}` - All .png, .ico or .md files in all directories\n- `/myapp/src/**/*.ts` - All .ts files inside src directory (and all its subdirectories)\n- `**/!(*.module).ts` - All .ts files but not .module.ts\n- **.png , *.jpg* will exclude all PNG and JPEG image files in a directory (files with the extensions .png and .jpg).\n- **internal** will exclude all files in a directory that contain 'internal' in the file name, such as 'internal', 'internal_only', 'company_internal'.\n- ***/*internal** will exclude all internal-related files in a directory and its subdirectories.\n\nFor more examples, see [Use of Exclude and Include Filters](https://docs.aws.amazon.com/cli/latest/reference/s3/#use-of-exclude-and-include-filters) in the AWS CLI Command Reference.", - "InclusionPatterns": "A list of glob patterns for documents that should be indexed. If a document that matches an inclusion pattern also matches an exclusion pattern, the document is not indexed.\n\nSome [examples](https://docs.aws.amazon.com/cli/latest/reference/s3/#use-of-exclude-and-include-filters) are:\n\n- **.txt* will include all text files in a directory (files with the extension .txt).\n- ***/*.txt* will include all text files in a directory and its subdirectories.\n- **tax** will include all files in a directory that contain 'tax' in the file name, such as 'tax', 'taxes', 'income_tax'.", + "ExclusionPatterns": "A list of glob patterns (patterns that can expand a wildcard pattern into a list of path names that match the given pattern) for certain file names and file types to exclude from your index. If a document matches both an inclusion and exclusion prefix or pattern, the exclusion prefix takes precendence and the document is not indexed. Examples of glob patterns include:\n\n- `/myapp/config/*` - All files inside config directory\n- `/**/*.png` - All .png files in all directories\n- `/**/*.{png,ico,md}` - All .png, .ico or .md files in all directories\n- `/myapp/src/**/*.ts` - All .ts files inside src directory (and all its subdirectories)\n- `**/!(*.module).ts` - All .ts files but not .module.ts\n- **.png , *.jpg* excludes all PNG and JPEG image files in a directory (files with the extensions .png and .jpg).\n- **internal** excludes all files in a directory that contain 'internal' in the file name, such as 'internal', 'internal_only', 'company_internal'.\n- ***/*internal** excludes all internal-related files in a directory and its subdirectories.\n\nFor more examples, see [Use of Exclude and Include Filters](https://docs.aws.amazon.com/cli/latest/reference/s3/#use-of-exclude-and-include-filters) in the AWS CLI Command Reference.", + "InclusionPatterns": "A list of glob patterns (patterns that can expand a wildcard pattern into a list of path names that match the given pattern) for certain file names and file types to include in your index. If a document matches both an inclusion and exclusion prefix or pattern, the exclusion prefix takes precendence and the document is not indexed. Examples of glob patterns include:\n\n- `/myapp/config/*` - All files inside config directory\n- `/**/*.png` - All .png files in all directories\n- `/**/*.{png,ico,md}` - All .png, .ico or .md files in all directories\n- `/myapp/src/**/*.ts` - All .ts files inside src directory (and all its subdirectories)\n- `**/!(*.module).ts` - All .ts files but not .module.ts\n- **.png , *.jpg* includes all PNG and JPEG image files in a directory (files with the extensions .png and .jpg).\n- **internal** includes all files in a directory that contain 'internal' in the file name, such as 'internal', 'internal_only', 'company_internal'.\n- ***/*internal** includes all internal-related files in a directory and its subdirectories.\n\nFor more examples, see [Use of Exclude and Include Filters](https://docs.aws.amazon.com/cli/latest/reference/s3/#use-of-exclude-and-include-filters) in the AWS CLI Command Reference.", "InclusionPrefixes": "A list of S3 prefixes for the documents that should be included in the index." }, "AWS::Kendra::DataSource S3Path": { @@ -20558,10 +20644,12 @@ "BufferingHints": "The buffering option.", "CloudWatchLoggingOptions": "The Amazon CloudWatch logging options for your delivery stream.", "CompressionFormat": "The compression format. If no value is specified, the default is `UNCOMPRESSED` .", + "CustomTimeZone": "The time zone you prefer. UTC is the default.", "DataFormatConversionConfiguration": "The serializer, deserializer, and schema for converting data from the JSON format to the Parquet or ORC format before writing it to Amazon S3.", "DynamicPartitioningConfiguration": "The configuration of the dynamic partitioning mechanism that creates targeted data sets from the streaming data by partitioning it based on partition keys.", "EncryptionConfiguration": "The encryption configuration for the Kinesis Data Firehose delivery stream. The default value is `NoEncryption` .", "ErrorOutputPrefix": "A prefix that Kinesis Data Firehose evaluates and adds to failed records before writing them to S3. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see [Custom Prefixes for Amazon S3 Objects](https://docs.aws.amazon.com/firehose/latest/dev/s3-prefixes.html) .", + "FileExtension": "Specify a file extension. It will override the default file extension", "Prefix": "The `YYYY/MM/DD/HH` time format prefix is automatically used for delivered Amazon S3 files. For more information, see [ExtendedS3DestinationConfiguration](https://docs.aws.amazon.com/firehose/latest/APIReference/API_ExtendedS3DestinationConfiguration.html) in the *Amazon Kinesis Data Firehose API Reference* .", "ProcessingConfiguration": "The data processing configuration for the Kinesis Data Firehose delivery stream.", "RoleARN": "The Amazon Resource Name (ARN) of the AWS credentials. For constraints, see [ExtendedS3DestinationConfiguration](https://docs.aws.amazon.com/firehose/latest/APIReference/API_ExtendedS3DestinationConfiguration.html) in the *Amazon Kinesis Data Firehose API Reference* .", @@ -21790,7 +21878,7 @@ "ContainerServiceDeployment": "An object that describes the current container deployment of the container service.", "IsDisabled": "A Boolean value indicating whether the container service is disabled.", "Power": "The power specification of the container service.\n\nThe power specifies the amount of RAM, the number of vCPUs, and the base price of the container service.", - "PrivateRegistryAccess": "An object that describes the configuration for the container service to access private container image repositories, such as Amazon Elastic Container Registry ( Amazon ECR ) private repositories.\n\nFor more information, see [Configuring access to an Amazon ECR private repository for an Amazon Lightsail container service](https://docs.aws.amazon.com/latest/userguide/amazon-lightsail-container-service-ecr-private-repo-access) in the *Amazon Lightsail Developer Guide* .", + "PrivateRegistryAccess": "An object that describes the configuration for the container service to access private container image repositories, such as Amazon Elastic Container Registry ( Amazon ECR ) private repositories.\n\nFor more information, see [Configuring access to an Amazon ECR private repository for an Amazon Lightsail container service](https://docs.aws.amazon.com/lightsail/latest/userguide/amazon-lightsail-container-service-ecr-private-repo-access) in the *Amazon Lightsail Developer Guide* .", "PublicDomainNames": "The public domain name of the container service, such as `example.com` and `www.example.com` .\n\nYou can specify up to four public domain names for a container service. The domain names that you specify are used when you create a deployment with a container that is configured as the public endpoint of your container service.\n\nIf you don't specify public domain names, then you can use the default domain of the container service.\n\n> You must create and validate an SSL/TLS certificate before you can use public domain names with your container service. Use the [AWS::Lightsail::Certificate](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lightsail-certificate.html) resource to create a certificate for the public domain names that you want to use with your container service.", "Scale": "The scale specification of the container service.\n\nThe scale specifies the allocated compute nodes of the container service.", "ServiceName": "The name of the container service.", @@ -22131,7 +22219,7 @@ "TrackerName": "The name for the tracker resource.\n\nRequirements:\n\n- Contain only alphanumeric characters (A-Z, a-z, 0-9) , hyphens (-), periods (.), and underscores (_).\n- Must be a unique tracker resource name.\n- No spaces allowed. For example, `ExampleTracker` ." }, "AWS::Logs::AccountPolicy": { - "PolicyDocument": "Specify the policy, in JSON.\n\n*Data protection policy*\n\nA data protection policy must include two JSON blocks:\n\n- The first block must include both a `DataIdentifer` array and an `Operation` property with an `Audit` action. The `DataIdentifer` array lists the types of sensitive data that you want to mask. For more information about the available options, see [Types of data that you can mask](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/mask-sensitive-log-data-types.html) .\n\nThe `Operation` property with an `Audit` action is required to find the sensitive data terms. This `Audit` action must contain a `FindingsDestination` object. You can optionally use that `FindingsDestination` object to list one or more destinations to send audit findings to. If you specify destinations such as log groups, Kinesis Data Firehose streams, and S3 buckets, they must already exist.\n- The second block must include both a `DataIdentifer` array and an `Operation` property with an `Deidentify` action. The `DataIdentifer` array must exactly match the `DataIdentifer` array in the first block of the policy.\n\nThe `Operation` property with the `Deidentify` action is what actually masks the data, and it must contain the `\"MaskConfig\": {}` object. The `\"MaskConfig\": {}` object must be empty.\n\n> The contents of the two `DataIdentifer` arrays must match exactly. \n\nIn addition to the two JSON blocks, the `policyDocument` can also include `Name` , `Description` , and `Version` fields. The `Name` is different than the operation's `policyName` parameter, and is used as a dimension when CloudWatch Logs reports audit findings metrics to CloudWatch .\n\nThe JSON specified in `policyDocument` can be up to 30,720 characters long.\n\n*Subscription filter policy*\n\nA subscription filter policy can include the following attributes in a JSON block:\n\n- *DestinationArn* The ARN of the destination to deliver log events to. Supported destinations are:\n\n- An Kinesis Data Streams data stream in the same account as the subscription policy, for same-account delivery.\n- An Kinesis Data Firehose data stream in the same account as the subscription policy, for same-account delivery.\n- A Lambda function in the same account as the subscription policy, for same-account delivery.\n- A logical destination in a different account created with [PutDestination](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDestination.html) , for cross-account delivery. Kinesis Data Streams and Kinesis Data Firehose are supported as logical destinations.\n- *RoleArn* The ARN of an IAM role that grants CloudWatch Logs permissions to deliver ingested log events to the destination stream. You don't need to provide the ARN when you are working with a logical destination for cross-account delivery.\n- *FilterPattern* A filter pattern for subscribing to a filtered stream of log events.\n- *Distribution* The method used to distribute log data to the destination. By default, log data is grouped by log stream, but the grouping can be set to `Random` for a more even distribution. This property is only applicable when the destination is an Kinesis Data Streams data stream.", + "PolicyDocument": "Specify the policy, in JSON.\n\n*Data protection policy*\n\nA data protection policy must include two JSON blocks:\n\n- The first block must include both a `DataIdentifer` array and an `Operation` property with an `Audit` action. The `DataIdentifer` array lists the types of sensitive data that you want to mask. For more information about the available options, see [Types of data that you can mask](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/mask-sensitive-log-data-types.html) .\n\nThe `Operation` property with an `Audit` action is required to find the sensitive data terms. This `Audit` action must contain a `FindingsDestination` object. You can optionally use that `FindingsDestination` object to list one or more destinations to send audit findings to. If you specify destinations such as log groups, Firehose streams, and S3 buckets, they must already exist.\n- The second block must include both a `DataIdentifer` array and an `Operation` property with an `Deidentify` action. The `DataIdentifer` array must exactly match the `DataIdentifer` array in the first block of the policy.\n\nThe `Operation` property with the `Deidentify` action is what actually masks the data, and it must contain the `\"MaskConfig\": {}` object. The `\"MaskConfig\": {}` object must be empty.\n\n> The contents of the two `DataIdentifer` arrays must match exactly. \n\nIn addition to the two JSON blocks, the `policyDocument` can also include `Name` , `Description` , and `Version` fields. The `Name` is different than the operation's `policyName` parameter, and is used as a dimension when CloudWatch Logs reports audit findings metrics to CloudWatch .\n\nThe JSON specified in `policyDocument` can be up to 30,720 characters long.\n\n*Subscription filter policy*\n\nA subscription filter policy can include the following attributes in a JSON block:\n\n- *DestinationArn* The ARN of the destination to deliver log events to. Supported destinations are:\n\n- An Kinesis Data Streams data stream in the same account as the subscription policy, for same-account delivery.\n- An Firehose data stream in the same account as the subscription policy, for same-account delivery.\n- A Lambda function in the same account as the subscription policy, for same-account delivery.\n- A logical destination in a different account created with [PutDestination](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDestination.html) , for cross-account delivery. Kinesis Data Streams and Firehose are supported as logical destinations.\n- *RoleArn* The ARN of an IAM role that grants CloudWatch Logs permissions to deliver ingested log events to the destination stream. You don't need to provide the ARN when you are working with a logical destination for cross-account delivery.\n- *FilterPattern* A filter pattern for subscribing to a filtered stream of log events.\n- *Distribution* The method used to distribute log data to the destination. By default, log data is grouped by log stream, but the grouping can be set to `Random` for a more even distribution. This property is only applicable when the destination is an Kinesis Data Streams data stream.", "PolicyName": "A name for the policy. This must be unique within the account.", "PolicyType": "The type of policy that you're creating or updating.", "Scope": "Currently the only valid value for this parameter is `ALL` , which specifies that the policy applies to all log groups in the account. If you omit this parameter, the default of `ALL` is used. To scope down a subscription filter policy to a subset of log groups, use the `selectionCriteria` parameter.", @@ -22148,7 +22236,7 @@ }, "AWS::Logs::DeliveryDestination": { "DeliveryDestinationPolicy": "A structure that contains information about one delivery destination policy.", - "DestinationResourceArn": "The ARN of the AWS destination that this delivery destination represents. That AWS destination can be a log group in CloudWatch Logs, an Amazon S3 bucket, or a delivery stream in Kinesis Data Firehose.", + "DestinationResourceArn": "The ARN of the AWS destination that this delivery destination represents. That AWS destination can be a log group in CloudWatch Logs, an Amazon S3 bucket, or a delivery stream in Firehose.", "Name": "The name of this delivery destination.", "Tags": "The tags that have been assigned to this delivery destination." }, @@ -24906,6 +24994,28 @@ "Key": "A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and can't be prefixed with `aws:` or `rds:` . The string can only contain the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\").", "Value": "A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and can't be prefixed with `aws:` or `rds:` . The string can only contain the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: \"^([\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-]*)$\")." }, + "AWS::NeptuneGraph::Graph": { + "DeletionProtection": "A value that indicates whether the graph has deletion protection enabled. The graph can't be deleted when deletion protection is enabled.", + "GraphName": "The graph name. For example: `my-graph-1` .\n\nThe name must contain from 1 to 63 letters, numbers, or hyphens, and its first character must be a letter. It cannot end with a hyphen or contain two consecutive hyphens.\n\nIf you don't specify a graph name, a unique graph name is generated for you using the prefix `graph-for` , followed by a combination of `Stack Name` and a `UUID` .", + "ProvisionedMemory": "The provisioned memory-optimized Neptune Capacity Units (m-NCUs) to use for the graph.\n\nMin = 128", + "PublicConnectivity": "Specifies whether or not the graph can be reachable over the internet. All access to graphs is IAM authenticated.\n\nWhen the graph is publicly available, its domain name system (DNS) endpoint resolves to the public IP address from the internet. When the graph isn't publicly available, you need to create a `PrivateGraphEndpoint` in a given VPC to ensure the DNS name resolves to a private IP address that is reachable from the VPC.\n\nDefault: If not specified, the default value is false.\n\n> If enabling public connectivity for the first time, there will be a delay while it is enabled.", + "ReplicaCount": "The number of replicas in other AZs.\n\nDefault: If not specified, the default value is 1.", + "Tags": "Adds metadata tags to the new graph. These tags can also be used with cost allocation reporting, or used in a Condition statement in an IAM policy.", + "VectorSearchConfiguration": "Specifies the number of dimensions for vector embeddings that will be loaded into the graph. The value is specified as `dimension=` value. Max = 65,535" + }, + "AWS::NeptuneGraph::Graph Tag": { + "Key": "*Key* (string) \u2013 A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length. It can't be prefixed with `AWS :` and can only contain the set of Unicode characters specified by this Java regular expression: `\"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$\")` .", + "Value": "*Value* (string) \u2013 A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length. It can't be prefixed with `AWS :` and can only contain the set of Unicode characters specified by this Java regular expression: `\"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$\")` ." + }, + "AWS::NeptuneGraph::Graph VectorSearchConfiguration": { + "VectorSearchDimension": "The number of dimensions." + }, + "AWS::NeptuneGraph::PrivateGraphEndpoint": { + "GraphIdentifier": "The unique identifier of the Neptune Analytics graph.", + "SecurityGroupIds": "Security groups to be attached to the private graph endpoint..", + "SubnetIds": "Subnets in which private graph endpoint ENIs are created.", + "VpcId": "The VPC in which the private graph endpoint needs to be created." + }, "AWS::NetworkFirewall::Firewall": { "DeleteProtection": "A flag indicating whether it is possible to delete the firewall. A setting of `TRUE` indicates that the firewall is protected against deletion. Use this setting to protect against accidentally deleting a firewall that is in use. When you create a firewall, the operation initializes this flag to `TRUE` .", "Description": "A description of the firewall.", @@ -24987,8 +25097,8 @@ "LoggingConfiguration": "Defines how AWS Network Firewall performs logging for a `Firewall` ." }, "AWS::NetworkFirewall::LoggingConfiguration LogDestinationConfig": { - "LogDestination": "The named location for the logs, provided in a key:value mapping that is specific to the chosen destination type.\n\n- For an Amazon S3 bucket, provide the name of the bucket, with key `bucketName` , and optionally provide a prefix, with key `prefix` . The following example specifies an Amazon S3 bucket named `DOC-EXAMPLE-BUCKET` and the prefix `alerts` :\n\n`\"LogDestination\": { \"bucketName\": \"DOC-EXAMPLE-BUCKET\", \"prefix\": \"alerts\" }`\n- For a CloudWatch log group, provide the name of the CloudWatch log group, with key `logGroup` . The following example specifies a log group named `alert-log-group` :\n\n`\"LogDestination\": { \"logGroup\": \"alert-log-group\" }`\n- For a Kinesis Data Firehose delivery stream, provide the name of the delivery stream, with key `deliveryStream` . The following example specifies a delivery stream named `alert-delivery-stream` :\n\n`\"LogDestination\": { \"deliveryStream\": \"alert-delivery-stream\" }`", - "LogDestinationType": "The type of storage destination to send these logs to. You can send logs to an Amazon S3 bucket, a CloudWatch log group, or a Kinesis Data Firehose delivery stream.", + "LogDestination": "The named location for the logs, provided in a key:value mapping that is specific to the chosen destination type.\n\n- For an Amazon S3 bucket, provide the name of the bucket, with key `bucketName` , and optionally provide a prefix, with key `prefix` . The following example specifies an Amazon S3 bucket named `DOC-EXAMPLE-BUCKET` and the prefix `alerts` :\n\n`\"LogDestination\": { \"bucketName\": \"DOC-EXAMPLE-BUCKET\", \"prefix\": \"alerts\" }`\n- For a CloudWatch log group, provide the name of the CloudWatch log group, with key `logGroup` . The following example specifies a log group named `alert-log-group` :\n\n`\"LogDestination\": { \"logGroup\": \"alert-log-group\" }`\n- For a Firehose delivery stream, provide the name of the delivery stream, with key `deliveryStream` . The following example specifies a delivery stream named `alert-delivery-stream` :\n\n`\"LogDestination\": { \"deliveryStream\": \"alert-delivery-stream\" }`", + "LogDestinationType": "The type of storage destination to send these logs to. You can send logs to an Amazon S3 bucket, a CloudWatch log group, or a Firehose delivery stream.", "LogType": "The type of log to send. Alert logs report traffic that matches a stateful rule with an action setting that sends an alert log message. Flow logs are standard network traffic flow logs." }, "AWS::NetworkFirewall::LoggingConfiguration LoggingConfiguration": { @@ -25608,7 +25718,7 @@ "AWS::OpenSearchServerless::Collection": { "Description": "A description of the collection.", "Name": "The name of the collection.\n\nCollection names must meet the following criteria:\n\n- Starts with a lowercase letter\n- Unique to your account and AWS Region\n- Contains between 3 and 28 characters\n- Contains only lowercase letters a-z, the numbers 0-9, and the hyphen (-)", - "StandbyReplicas": "Indicates whether standby replicas should be used for a collection.", + "StandbyReplicas": "Indicates whether to use standby replicas for the collection. You can't update this property after the collection is already created. If you attempt to modify this property, the collection continues to use the original value.", "Tags": "An arbitrary set of tags (key\u2013value pairs) to associate with the collection.\n\nFor more information, see [Tag](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html) .", "Type": "The type of collection. Possible values are `SEARCH` , `TIMESERIES` , and `VECTORSEARCH` . For more information, see [Choosing a collection type](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/serverless-overview.html#serverless-usecase) ." }, @@ -26682,7 +26792,7 @@ }, "AWS::Pinpoint::EventStream": { "ApplicationId": "The unique identifier for the Amazon Pinpoint application that you want to export data from.", - "DestinationStreamArn": "The Amazon Resource Name (ARN) of the Amazon Kinesis Data Stream or Amazon Kinesis Data Firehose delivery stream that you want to publish event data to.\n\nFor a Kinesis Data Stream, the ARN format is: `arn:aws:kinesis: region : account-id :stream/ stream_name`\n\nFor a Kinesis Data Firehose delivery stream, the ARN format is: `arn:aws:firehose: region : account-id :deliverystream/ stream_name`", + "DestinationStreamArn": "The Amazon Resource Name (ARN) of the Amazon Kinesis Data Stream or Amazon Data Firehose delivery stream that you want to publish event data to.\n\nFor a Kinesis Data Stream, the ARN format is: `arn:aws:kinesis: region : account-id :stream/ stream_name`\n\nFor a Firehose delivery stream, the ARN format is: `arn:aws:firehose: region : account-id :deliverystream/ stream_name`", "RoleArn": "The AWS Identity and Access Management (IAM) role that authorizes Amazon Pinpoint to publish event data to the stream in your AWS account." }, "AWS::Pinpoint::GCMChannel": { @@ -27026,7 +27136,7 @@ "Filters": "The event patterns." }, "AWS::Pipes::Pipe FirehoseLogDestination": { - "DeliveryStreamArn": "The Amazon Resource Name (ARN) of the Kinesis Data Firehose delivery stream to which EventBridge delivers the pipe log records." + "DeliveryStreamArn": "The Amazon Resource Name (ARN) of the Firehose delivery stream to which EventBridge delivers the pipe log records." }, "AWS::Pipes::Pipe MQBrokerAccessCredentials": { "BasicAuth": "The ARN of the Secrets Manager secret." @@ -27049,7 +27159,7 @@ }, "AWS::Pipes::Pipe PipeLogConfiguration": { "CloudwatchLogsLogDestination": "The logging configuration settings for the pipe.", - "FirehoseLogDestination": "The Amazon Kinesis Data Firehose logging configuration settings for the pipe.", + "FirehoseLogDestination": "The Amazon Data Firehose logging configuration settings for the pipe.", "IncludeExecutionData": "Whether the execution data (specifically, the `payload` , `awsRequest` , and `awsResponse` fields) is included in the log messages for this pipe.\n\nThis applies to all log destinations for the pipe.\n\nFor more information, see [Including execution data in logs](https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-pipes-logs.html#eb-pipes-logs-execution-data) in the *Amazon EventBridge User Guide* .\n\n*Allowed values:* `ALL`", "Level": "The level of logging detail to include. This applies to all log destinations for the pipe.", "S3LogDestination": "The Amazon S3 logging configuration settings for the pipe." @@ -29815,6 +29925,7 @@ "DashboardId": "The ID for the dashboard, also added to the IAM policy.", "DashboardPublishOptions": "Options for publishing the dashboard when you create it:\n\n- `AvailabilityStatus` for `AdHocFilteringOption` - This status can be either `ENABLED` or `DISABLED` . When this is set to `DISABLED` , Amazon QuickSight disables the left filter pane on the published dashboard, which can be used for ad hoc (one-time) filtering. This option is `ENABLED` by default.\n- `AvailabilityStatus` for `ExportToCSVOption` - This status can be either `ENABLED` or `DISABLED` . The visual option to export data to .CSV format isn't enabled when this is set to `DISABLED` . This option is `ENABLED` by default.\n- `VisibilityState` for `SheetControlsOption` - This visibility state can be either `COLLAPSED` or `EXPANDED` . This option is `COLLAPSED` by default.", "Definition": "", + "LinkEntities": "A list of analysis Amazon Resource Names (ARNs) to be linked to the dashboard.", "LinkSharingConfiguration": "A structure that contains the link sharing configurations that you want to apply overrides to.", "Name": "The display name of the dashboard.", "Parameters": "The parameters for the creation of the dashboard, which you want to use to override the default settings. A dashboard can have any type of parameters, and some parameters might accept multiple values.", @@ -35761,15 +35872,15 @@ "AutomaticBackupReplicationRegion": "The destination region for the backup replication of the DB instance. For more info, see [Replicating automated backups to another AWS Region](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_ReplicateBackups.html) in the *Amazon RDS User Guide* .", "AvailabilityZone": "The Availability Zone (AZ) where the database will be created. For information on AWS Regions and Availability Zones, see [Regions and Availability Zones](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html) .\n\nFor Amazon Aurora, each Aurora DB cluster hosts copies of its storage in three separate Availability Zones. Specify one of these Availability Zones. Aurora automatically chooses an appropriate Availability Zone if you don't specify one.\n\nDefault: A random, system-chosen Availability Zone in the endpoint's AWS Region .\n\nConstraints:\n\n- The `AvailabilityZone` parameter can't be specified if the DB instance is a Multi-AZ deployment.\n- The specified Availability Zone must be in the same AWS Region as the current endpoint.\n\nExample: `us-east-1d`", "BackupRetentionPeriod": "The number of days for which automated backups are retained. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups.\n\n*Amazon Aurora*\n\nNot applicable. The retention period for automated backups is managed by the DB cluster.\n\nDefault: 1\n\nConstraints:\n\n- Must be a value from 0 to 35\n- Can't be set to 0 if the DB instance is a source to read replicas", - "CACertificateIdentifier": "The identifier of the CA certificate for this DB instance.\n\nSpecifying or updating this property triggers a reboot. For more information about CA certificate identifiers for RDS DB engines, see [Rotating Your SSL/TLS Certificate](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.SSL-certificate-rotation.html) in the *Amazon RDS User Guide* . For more information about CA certificate identifiers for Aurora DB engines, see [Rotating Your SSL/TLS Certificate](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/UsingWithRDS.SSL-certificate-rotation.html) in the *Amazon Aurora User Guide* .", + "CACertificateIdentifier": "The identifier of the CA certificate for this DB instance.\n\nFor more information, see [Using SSL/TLS to encrypt a connection to a DB instance](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.SSL.html) in the *Amazon RDS User Guide* and [Using SSL/TLS to encrypt a connection to a DB cluster](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/UsingWithRDS.SSL.html) in the *Amazon Aurora User Guide* .", "CertificateDetails": "The details of the DB instance's server certificate.", "CertificateRotationRestart": "Specifies whether the DB instance is restarted when you rotate your SSL/TLS certificate.\n\nBy default, the DB instance is restarted when you rotate your SSL/TLS certificate. The certificate is not updated until the DB instance is restarted.\n\n> Set this parameter only if you are *not* using SSL/TLS to connect to the DB instance. \n\nIf you are using SSL/TLS to connect to the DB instance, follow the appropriate instructions for your DB engine to rotate your SSL/TLS certificate:\n\n- For more information about rotating your SSL/TLS certificate for RDS DB engines, see [Rotating Your SSL/TLS Certificate.](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.SSL-certificate-rotation.html) in the *Amazon RDS User Guide.*\n- For more information about rotating your SSL/TLS certificate for Aurora DB engines, see [Rotating Your SSL/TLS Certificate](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/UsingWithRDS.SSL-certificate-rotation.html) in the *Amazon Aurora User Guide* .\n\nThis setting doesn't apply to RDS Custom DB instances.", "CharacterSetName": "For supported engines, indicates that the DB instance should be associated with the specified character set.\n\n*Amazon Aurora*\n\nNot applicable. The character set is managed by the DB cluster. For more information, see [AWS::RDS::DBCluster](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-rds-dbcluster.html) .", "CopyTagsToSnapshot": "Specifies whether to copy tags from the DB instance to snapshots of the DB instance. By default, tags are not copied.\n\nThis setting doesn't apply to Amazon Aurora DB instances. Copying tags to snapshots is managed by the DB cluster. Setting this value for an Aurora DB instance has no effect on the DB cluster setting.", "CustomIAMInstanceProfile": "The instance profile associated with the underlying Amazon EC2 instance of an RDS Custom DB instance.\n\nThis setting is required for RDS Custom.\n\nConstraints:\n\n- The profile must exist in your account.\n- The profile must have an IAM role that Amazon EC2 has permissions to assume.\n- The instance profile name and the associated IAM role name must start with the prefix `AWSRDSCustom` .\n\nFor the list of permissions required for the IAM role, see [Configure IAM and your VPC](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/custom-setup-orcl.html#custom-setup-orcl.iam-vpc) in the *Amazon RDS User Guide* .", "DBClusterIdentifier": "The identifier of the DB cluster that the instance will belong to.", - "DBClusterSnapshotIdentifier": "The identifier for the RDS for MySQL Multi-AZ DB cluster snapshot to restore from.\n\nFor more information on Multi-AZ DB clusters, see [Multi-AZ DB cluster deployments](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/multi-az-db-clusters-concepts.html) in the *Amazon RDS User Guide* .\n\nConstraints:\n\n- Must match the identifier of an existing Multi-AZ DB cluster snapshot.\n- Can't be specified when `DBSnapshotIdentifier` is specified.\n- Must be specified when `DBSnapshotIdentifier` isn't specified.\n- If you are restoring from a shared manual Multi-AZ DB cluster snapshot, the `DBClusterSnapshotIdentifier` must be the ARN of the shared snapshot.\n- Can't be the identifier of an Aurora DB cluster snapshot.\n- Can't be the identifier of an RDS for PostgreSQL Multi-AZ DB cluster snapshot.", - "DBInstanceClass": "The compute and memory capacity of the DB instance, for example, `db.m4.large` . Not all DB instance classes are available in all AWS Regions, or for all database engines.\n\nFor the full list of DB instance classes, and availability for your engine, see [DB Instance Class](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html) in the *Amazon RDS User Guide.* For more information about DB instance class pricing and AWS Region support for DB instance classes, see [Amazon RDS Pricing](https://docs.aws.amazon.com/rds/pricing/) .", + "DBClusterSnapshotIdentifier": "The identifier for the Multi-AZ DB cluster snapshot to restore from.\n\nFor more information on Multi-AZ DB clusters, see [Multi-AZ DB cluster deployments](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/multi-az-db-clusters-concepts.html) in the *Amazon RDS User Guide* .\n\nConstraints:\n\n- Must match the identifier of an existing Multi-AZ DB cluster snapshot.\n- Can't be specified when `DBSnapshotIdentifier` is specified.\n- Must be specified when `DBSnapshotIdentifier` isn't specified.\n- If you are restoring from a shared manual Multi-AZ DB cluster snapshot, the `DBClusterSnapshotIdentifier` must be the ARN of the shared snapshot.\n- Can't be the identifier of an Aurora DB cluster snapshot.", + "DBInstanceClass": "The compute and memory capacity of the DB instance, for example `db.m5.large` . Not all DB instance classes are available in all AWS Regions , or for all database engines. For the full list of DB instance classes, and availability for your engine, see [DB instance classes](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html) in the *Amazon RDS User Guide* or [Aurora DB instance classes](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Concepts.DBInstanceClass.html) in the *Amazon Aurora User Guide* .", "DBInstanceIdentifier": "A name for the DB instance. If you specify a name, AWS CloudFormation converts it to lowercase. If you don't specify a name, AWS CloudFormation generates a unique physical ID and uses that ID for the DB instance. For more information, see [Name Type](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-name.html) .\n\nFor information about constraints that apply to DB instance identifiers, see [Naming constraints in Amazon RDS](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Limits.html#RDS_Limits.Constraints) in the *Amazon RDS User Guide* .\n\n> If you specify a name, you can't perform updates that require replacement of this resource. You can perform updates that require no or some interruption. If you must replace the resource, specify a new name.", "DBName": "The meaning of this parameter differs according to the database engine you use.\n\n> If you specify the `[DBSnapshotIdentifier](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-rds-database-instance.html#cfn-rds-dbinstance-dbsnapshotidentifier)` property, this property only applies to RDS for Oracle. \n\n*Amazon Aurora*\n\nNot applicable. The database name is managed by the DB cluster.\n\n*Db2*\n\nThe name of the database to create when the DB instance is created. If this parameter isn't specified, no database is created in the DB instance.\n\nConstraints:\n\n- Must contain 1 to 64 letters or numbers.\n- Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).\n- Can't be a word reserved by the specified database engine.\n\n*MySQL*\n\nThe name of the database to create when the DB instance is created. If this parameter is not specified, no database is created in the DB instance.\n\nConstraints:\n\n- Must contain 1 to 64 letters or numbers.\n- Can't be a word reserved by the specified database engine\n\n*MariaDB*\n\nThe name of the database to create when the DB instance is created. If this parameter is not specified, no database is created in the DB instance.\n\nConstraints:\n\n- Must contain 1 to 64 letters or numbers.\n- Can't be a word reserved by the specified database engine\n\n*PostgreSQL*\n\nThe name of the database to create when the DB instance is created. If this parameter is not specified, the default `postgres` database is created in the DB instance.\n\nConstraints:\n\n- Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9).\n- Must contain 1 to 63 characters.\n- Can't be a word reserved by the specified database engine\n\n*Oracle*\n\nThe Oracle System ID (SID) of the created DB instance. If you specify `null` , the default value `ORCL` is used. You can't specify the string NULL, or any other reserved word, for `DBName` .\n\nDefault: `ORCL`\n\nConstraints:\n\n- Can't be longer than 8 characters\n\n*SQL Server*\n\nNot applicable. Must be null.", "DBParameterGroupName": "The name of an existing DB parameter group or a reference to an [AWS::RDS::DBParameterGroup](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-rds-dbparametergroup.html) resource created in the template.\n\nTo list all of the available DB parameter group names, use the following command:\n\n`aws rds describe-db-parameter-groups --query \"DBParameterGroups[].DBParameterGroupName\" --output text`\n\n> If any of the data members of the referenced parameter group are changed during an update, the DB instance might need to be restarted, which causes some interruption. If the parameter group contains static parameters, whether they were changed or not, an update triggers a reboot. \n\nIf you don't specify a value for `DBParameterGroupName` property, the default DB parameter group for the specified engine and engine version is used.", @@ -36221,7 +36332,7 @@ "ResumeCluster": "An action that runs a `ResumeCluster` API operation." }, "AWS::RedshiftServerless::Namespace": { - "AdminPasswordSecretKmsKeyId": "The ID of the AWS Key Management Service (KMS) key used to encrypt and store the namespace's admin credentials secret.", + "AdminPasswordSecretKmsKeyId": "The ID of the AWS Key Management Service (KMS) key used to encrypt and store the namespace's admin credentials secret. You can only use this parameter if `ManageAdminPassword` is `true` .", "AdminUserPassword": "The password of the administrator for the primary database created in the namespace.", "AdminUsername": "The username of the administrator for the primary database created in the namespace.", "DbName": "The name of the primary database created in the namespace.", @@ -36231,10 +36342,11 @@ "IamRoles": "A list of IAM roles to associate with the namespace.", "KmsKeyId": "The ID of the AWS Key Management Service key used to encrypt your data.", "LogExports": "The types of logs the namespace can export. Available export types are `userlog` , `connectionlog` , and `useractivitylog` .", - "ManageAdminPassword": "If true, Amazon Redshift uses AWS Secrets Manager to manage the namespace's admin credentials.", + "ManageAdminPassword": "If true, Amazon Redshift uses AWS Secrets Manager to manage the namespace's admin credentials. You can't use `AdminUserPassword` if `ManageAdminPassword` is true. If `ManageAdminPassword` is `false` or not set, Amazon Redshift uses `AdminUserPassword` for the admin user account's password.", "NamespaceName": "The name of the namespace. Must be between 3-64 alphanumeric characters in lowercase, and it cannot be a reserved word. A list of reserved words can be found in [Reserved Words](https://docs.aws.amazon.com//redshift/latest/dg/r_pg_keywords.html) in the Amazon Redshift Database Developer Guide.", - "NamespaceResourcePolicy": "The resource policy object. Currently, you can use policies to share snapshots across AWS accounts.", + "NamespaceResourcePolicy": "The resource policy that will be attached to the namespace.", "RedshiftIdcApplicationArn": "The ARN for the Redshift application that integrates with IAM Identity Center.", + "SnapshotCopyConfigurations": "", "Tags": "The map of the key-value pairs used to tag the namespace." }, "AWS::RedshiftServerless::Namespace Namespace": { @@ -36252,6 +36364,11 @@ "NamespaceName": "The name of the namespace. Must be between 3-64 alphanumeric characters in lowercase, and it cannot be a reserved word. A list of reserved words can be found in [Reserved Words](https://docs.aws.amazon.com//redshift/latest/dg/r_pg_keywords.html) in the Amazon Redshift Database Developer Guide.", "Status": "The status of the namespace." }, + "AWS::RedshiftServerless::Namespace SnapshotCopyConfiguration": { + "DestinationKmsKeyId": "The ID of the KMS key to use to encrypt your snapshots in the destination AWS Region .", + "DestinationRegion": "The destination AWS Region to copy snapshots to.", + "SnapshotRetentionPeriod": "The retention period of snapshots that are copied to the destination AWS Region ." + }, "AWS::RedshiftServerless::Namespace Tag": { "Key": "The key to use in the tag.", "Value": "The value of the tag." @@ -36260,6 +36377,7 @@ "BaseCapacity": "The base compute capacity of the workgroup in Redshift Processing Units (RPUs).", "ConfigParameters": "A list of parameters to set for finer control over a database. Available options are `datestyle` , `enable_user_activity_logging` , `query_group` , `search_path` , `max_query_execution_time` , and `require_ssl` .", "EnhancedVpcRouting": "The value that specifies whether to enable enhanced virtual private cloud (VPC) routing, which forces Amazon Redshift Serverless to route traffic through your VPC.", + "MaxCapacity": "The maximum data-warehouse capacity Amazon Redshift Serverless uses to serve queries. The max capacity is specified in RPUs.", "NamespaceName": "The namespace the workgroup is associated with.", "Port": "The custom port to use when connecting to a workgroup. Valid port ranges are 5431-5455 and 8191-8215. The default is 5439.", "PubliclyAccessible": "A value that specifies whether the workgroup can be accessible from a public network.", @@ -36298,6 +36416,7 @@ "CreationDate": "The creation date of the workgroup.", "Endpoint": "The endpoint that is created from the workgroup.", "EnhancedVpcRouting": "The value that specifies whether to enable enhanced virtual private cloud (VPC) routing, which forces Amazon Redshift Serverless to route traffic through your VPC.", + "MaxCapacity": "The maximum data-warehouse capacity Amazon Redshift Serverless uses to serve queries. The max capacity is specified in RPUs.", "NamespaceName": "The namespace the workgroup is associated with.", "PubliclyAccessible": "A value that specifies whether the workgroup can be accessible from a public network", "SecurityGroupIds": "An array of security group IDs to associate with the workgroup.", @@ -36730,6 +36849,7 @@ "Comment": "*Optional:* Any comments you want to include about a change batch request.", "Failover": "*Failover resource record sets only:* To configure failover, you add the `Failover` element to two resource record sets. For one resource record set, you specify `PRIMARY` as the value for `Failover` ; for the other resource record set, you specify `SECONDARY` . In addition, you include the `HealthCheckId` element and specify the health check that you want Amazon Route 53 to perform for each resource record set.\n\nExcept where noted, the following failover behaviors assume that you have included the `HealthCheckId` element in both resource record sets:\n\n- When the primary resource record set is healthy, Route 53 responds to DNS queries with the applicable value from the primary resource record set regardless of the health of the secondary resource record set.\n- When the primary resource record set is unhealthy and the secondary resource record set is healthy, Route 53 responds to DNS queries with the applicable value from the secondary resource record set.\n- When the secondary resource record set is unhealthy, Route 53 responds to DNS queries with the applicable value from the primary resource record set regardless of the health of the primary resource record set.\n- If you omit the `HealthCheckId` element for the secondary resource record set, and if the primary resource record set is unhealthy, Route 53 always responds to DNS queries with the applicable value from the secondary resource record set. This is true regardless of the health of the associated endpoint.\n\nYou can't create non-failover resource record sets that have the same values for the `Name` and `Type` elements as failover resource record sets.\n\nFor failover alias resource record sets, you must also include the `EvaluateTargetHealth` element and set the value to true.\n\nFor more information about configuring failover for Route 53, see the following topics in the *Amazon Route 53 Developer Guide* :\n\n- [Route 53 Health Checks and DNS Failover](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover.html)\n- [Configuring Failover in a Private Hosted Zone](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-private-hosted-zones.html)", "GeoLocation": "*Geolocation resource record sets only:* A complex type that lets you control how Amazon Route 53 responds to DNS queries based on the geographic origin of the query. For example, if you want all queries from Africa to be routed to a web server with an IP address of `192.0.2.111` , create a resource record set with a `Type` of `A` and a `ContinentCode` of `AF` .\n\nIf you create separate resource record sets for overlapping geographic regions (for example, one resource record set for a continent and one for a country on the same continent), priority goes to the smallest geographic region. This allows you to route most queries for a continent to one resource and to route queries for a country on that continent to a different resource.\n\nYou can't create two geolocation resource record sets that specify the same geographic location.\n\nThe value `*` in the `CountryCode` element matches all geographic locations that aren't specified in other geolocation resource record sets that have the same values for the `Name` and `Type` elements.\n\n> Geolocation works by mapping IP addresses to locations. However, some IP addresses aren't mapped to geographic locations, so even if you create geolocation resource record sets that cover all seven continents, Route 53 will receive some DNS queries from locations that it can't identify. We recommend that you create a resource record set for which the value of `CountryCode` is `*` . Two groups of queries are routed to the resource that you specify in this record: queries that come from locations for which you haven't created geolocation resource record sets and queries from IP addresses that aren't mapped to a location. If you don't create a `*` resource record set, Route 53 returns a \"no answer\" response for queries from those locations. \n\nYou can't create non-geolocation resource record sets that have the same values for the `Name` and `Type` elements as geolocation resource record sets.", + "GeoProximityLocation": "*GeoproximityLocation resource record sets only:* A complex type that lets you control how Route\u00a053 responds to DNS queries based on the geographic origin of the query and your resources.", "HealthCheckId": "If you want Amazon Route 53 to return this resource record set in response to a DNS query only when the status of a health check is healthy, include the `HealthCheckId` element and specify the ID of the applicable health check.\n\nRoute 53 determines whether a resource record set is healthy based on one of the following:\n\n- By periodically sending a request to the endpoint that is specified in the health check\n- By aggregating the status of a specified group of health checks (calculated health checks)\n- By determining the current state of a CloudWatch alarm (CloudWatch metric health checks)\n\n> Route 53 doesn't check the health of the endpoint that is specified in the resource record set, for example, the endpoint specified by the IP address in the `Value` element. When you add a `HealthCheckId` element to a resource record set, Route 53 checks the health of the endpoint that you specified in the health check. \n\nFor more information, see the following topics in the *Amazon Route 53 Developer Guide* :\n\n- [How Amazon Route 53 Determines Whether an Endpoint Is Healthy](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-determining-health-of-endpoints.html)\n- [Route 53 Health Checks and DNS Failover](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover.html)\n- [Configuring Failover in a Private Hosted Zone](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-private-hosted-zones.html)\n\n*When to Specify HealthCheckId*\n\nSpecifying a value for `HealthCheckId` is useful only when Route 53 is choosing between two or more resource record sets to respond to a DNS query, and you want Route 53 to base the choice in part on the status of a health check. Configuring health checks makes sense only in the following configurations:\n\n- *Non-alias resource record sets* : You're checking the health of a group of non-alias resource record sets that have the same routing policy, name, and type (such as multiple weighted records named www.example.com with a type of A) and you specify health check IDs for all the resource record sets.\n\nIf the health check status for a resource record set is healthy, Route 53 includes the record among the records that it responds to DNS queries with.\n\nIf the health check status for a resource record set is unhealthy, Route 53 stops responding to DNS queries using the value for that resource record set.\n\nIf the health check status for all resource record sets in the group is unhealthy, Route 53 considers all resource record sets in the group healthy and responds to DNS queries accordingly.\n- *Alias resource record sets* : You specify the following settings:\n\n- You set `EvaluateTargetHealth` to true for an alias resource record set in a group of resource record sets that have the same routing policy, name, and type (such as multiple weighted records named www.example.com with a type of A).\n- You configure the alias resource record set to route traffic to a non-alias resource record set in the same hosted zone.\n- You specify a health check ID for the non-alias resource record set.\n\nIf the health check status is healthy, Route 53 considers the alias resource record set to be healthy and includes the alias record among the records that it responds to DNS queries with.\n\nIf the health check status is unhealthy, Route 53 stops responding to DNS queries using the alias resource record set.\n\n> The alias resource record set can also route traffic to a *group* of non-alias resource record sets that have the same routing policy, name, and type. In that configuration, associate health checks with all of the resource record sets in the group of non-alias resource record sets.\n\n*Geolocation Routing*\n\nFor geolocation resource record sets, if an endpoint is unhealthy, Route 53 looks for a resource record set for the larger, associated geographic region. For example, suppose you have resource record sets for a state in the United States, for the entire United States, for North America, and a resource record set that has `*` for `CountryCode` is `*` , which applies to all locations. If the endpoint for the state resource record set is unhealthy, Route 53 checks for healthy resource record sets in the following order until it finds a resource record set for which the endpoint is healthy:\n\n- The United States\n- North America\n- The default resource record set\n\n*Specifying the Health Check Endpoint by Domain Name*\n\nIf your health checks specify the endpoint only by domain name, we recommend that you create a separate health check for each endpoint. For example, create a health check for each `HTTP` server that is serving content for `www.example.com` . For the value of `FullyQualifiedDomainName` , specify the domain name of the server (such as `us-east-2-www.example.com` ), not the name of the resource record sets ( `www.example.com` ).\n\n> Health check results will be unpredictable if you do the following:\n> \n> - Create a health check that has the same value for `FullyQualifiedDomainName` as the name of a resource record set.\n> - Associate that health check with the resource record set.", "HostedZoneId": "The ID of the hosted zone that you want to create records in.\n\nSpecify either `HostedZoneName` or `HostedZoneId` , but not both. If you have multiple hosted zones with the same domain name, you must specify the hosted zone using `HostedZoneId` .", "HostedZoneName": "The name of the hosted zone that you want to create records in. You must include a trailing dot (for example, `www.example.com.` ) as part of the `HostedZoneName` .\n\nWhen you create a stack using an AWS::Route53::RecordSet that specifies `HostedZoneName` , AWS CloudFormation attempts to find a hosted zone whose name matches the HostedZoneName. If AWS CloudFormation cannot find a hosted zone with a matching domain name, or if there is more than one hosted zone with the specified domain name, AWS CloudFormation will not create the stack.\n\nSpecify either `HostedZoneName` or `HostedZoneId` , but not both. If you have multiple hosted zones with the same domain name, you must specify the hosted zone using `HostedZoneId` .", @@ -36751,11 +36871,21 @@ "CollectionId": "The CIDR collection ID.", "LocationName": "The CIDR collection location name." }, + "AWS::Route53::RecordSet Coordinates": { + "Latitude": "Specifies a coordinate of the north\u2013south position of a geographic point on the surface of the Earth (-90 - 90).", + "Longitude": "Specifies a coordinate of the east\u2013west position of a geographic point on the surface of the Earth (-180 - 180)." + }, "AWS::Route53::RecordSet GeoLocation": { "ContinentCode": "For geolocation resource record sets, a two-letter abbreviation that identifies a continent. Route 53 supports the following continent codes:\n\n- *AF* : Africa\n- *AN* : Antarctica\n- *AS* : Asia\n- *EU* : Europe\n- *OC* : Oceania\n- *NA* : North America\n- *SA* : South America\n\nConstraint: Specifying `ContinentCode` with either `CountryCode` or `SubdivisionCode` returns an `InvalidInput` error.", "CountryCode": "For geolocation resource record sets, the two-letter code for a country.\n\nRoute 53 uses the two-letter country codes that are specified in [ISO standard 3166-1 alpha-2](https://docs.aws.amazon.com/https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2) .", "SubdivisionCode": "For geolocation resource record sets, the two-letter code for a state of the United States. Route 53 doesn't support any other values for `SubdivisionCode` . For a list of state abbreviations, see [Appendix B: Two\u2013Letter State and Possession Abbreviations](https://docs.aws.amazon.com/https://pe.usps.com/text/pub28/28apb.htm) on the United States Postal Service website.\n\nIf you specify `subdivisioncode` , you must also specify `US` for `CountryCode` ." }, + "AWS::Route53::RecordSet GeoProximityLocation": { + "AWSRegion": "The AWS Region the resource you are directing DNS traffic to, is in.", + "Bias": "The bias increases or decreases the size of the geographic region from which Route\u00a053 routes traffic to a resource.\n\nTo use `Bias` to change the size of the geographic region, specify the applicable value for the bias:\n\n- To expand the size of the geographic region from which Route\u00a053 routes traffic to a resource, specify a positive integer from 1 to 99 for the bias. Route\u00a053 shrinks the size of adjacent regions.\n- To shrink the size of the geographic region from which Route\u00a053 routes traffic to a resource, specify a negative bias of -1 to -99. Route\u00a053 expands the size of adjacent regions.", + "Coordinates": "Contains the longitude and latitude for a geographic region.", + "LocalZoneGroup": "Specifies an AWS Local Zone Group.\n\nA local Zone Group is usually the Local Zone code without the ending character. For example, if the Local Zone is `us-east-1-bue-1a` the Local Zone Group is `us-east-1-bue-1` .\n\nYou can identify the Local Zones Group for a specific Local Zone by using the [describe-availability-zones](https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-availability-zones.html) CLI command:\n\nThis command returns: `\"GroupName\": \"us-west-2-den-1\"` , specifying that the Local Zone `us-west-2-den-1a` belongs to the Local Zone Group `us-west-2-den-1` ." + }, "AWS::Route53::RecordSetGroup": { "Comment": "*Optional:* Any comments you want to include about a change batch request.", "HostedZoneId": "The ID of the hosted zone that you want to create records in.\n\nSpecify either `HostedZoneName` or `HostedZoneId` , but not both. If you have multiple hosted zones with the same domain name, you must specify the hosted zone using `HostedZoneId` .", @@ -36771,16 +36901,27 @@ "CollectionId": "The CIDR collection ID.", "LocationName": "The CIDR collection location name." }, + "AWS::Route53::RecordSetGroup Coordinates": { + "Latitude": "Specifies a coordinate of the north\u2013south position of a geographic point on the surface of the Earth (-90 - 90).", + "Longitude": "Specifies a coordinate of the east\u2013west position of a geographic point on the surface of the Earth (-180 - 180)." + }, "AWS::Route53::RecordSetGroup GeoLocation": { "ContinentCode": "For geolocation resource record sets, a two-letter abbreviation that identifies a continent. Route 53 supports the following continent codes:\n\n- *AF* : Africa\n- *AN* : Antarctica\n- *AS* : Asia\n- *EU* : Europe\n- *OC* : Oceania\n- *NA* : North America\n- *SA* : South America\n\nConstraint: Specifying `ContinentCode` with either `CountryCode` or `SubdivisionCode` returns an `InvalidInput` error.", "CountryCode": "For geolocation resource record sets, the two-letter code for a country.\n\nRoute 53 uses the two-letter country codes that are specified in [ISO standard 3166-1 alpha-2](https://docs.aws.amazon.com/https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2) .", "SubdivisionCode": "For geolocation resource record sets, the two-letter code for a state of the United States. Route 53 doesn't support any other values for `SubdivisionCode` . For a list of state abbreviations, see [Appendix B: Two\u2013Letter State and Possession Abbreviations](https://docs.aws.amazon.com/https://pe.usps.com/text/pub28/28apb.htm) on the United States Postal Service website.\n\nIf you specify `subdivisioncode` , you must also specify `US` for `CountryCode` ." }, + "AWS::Route53::RecordSetGroup GeoProximityLocation": { + "AWSRegion": "The AWS Region the resource you are directing DNS traffic to, is in.", + "Bias": "The bias increases or decreases the size of the geographic region from which Route\u00a053 routes traffic to a resource.\n\nTo use `Bias` to change the size of the geographic region, specify the applicable value for the bias:\n\n- To expand the size of the geographic region from which Route\u00a053 routes traffic to a resource, specify a positive integer from 1 to 99 for the bias. Route\u00a053 shrinks the size of adjacent regions.\n- To shrink the size of the geographic region from which Route\u00a053 routes traffic to a resource, specify a negative bias of -1 to -99. Route\u00a053 expands the size of adjacent regions.", + "Coordinates": "Contains the longitude and latitude for a geographic region.", + "LocalZoneGroup": "Specifies an AWS Local Zone Group.\n\nA local Zone Group is usually the Local Zone code without the ending character. For example, if the Local Zone is `us-east-1-bue-1a` the Local Zone Group is `us-east-1-bue-1` .\n\nYou can identify the Local Zones Group for a specific Local Zone by using the [describe-availability-zones](https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-availability-zones.html) CLI command:\n\nThis command returns: `\"GroupName\": \"us-west-2-den-1\"` , specifying that the Local Zone `us-west-2-den-1a` belongs to the Local Zone Group `us-west-2-den-1` ." + }, "AWS::Route53::RecordSetGroup RecordSet": { "AliasTarget": "*Alias resource record sets only:* Information about the AWS resource, such as a CloudFront distribution or an Amazon S3 bucket, that you want to route traffic to.\n\nIf you're creating resource records sets for a private hosted zone, note the following:\n\n- You can't create an alias resource record set in a private hosted zone to route traffic to a CloudFront distribution.\n- For information about creating failover resource record sets in a private hosted zone, see [Configuring Failover in a Private Hosted Zone](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-private-hosted-zones.html) in the *Amazon Route 53 Developer Guide* .", "CidrRoutingConfig": "", "Failover": "*Failover resource record sets only:* To configure failover, you add the `Failover` element to two resource record sets. For one resource record set, you specify `PRIMARY` as the value for `Failover` ; for the other resource record set, you specify `SECONDARY` . In addition, you include the `HealthCheckId` element and specify the health check that you want Amazon Route 53 to perform for each resource record set.\n\nExcept where noted, the following failover behaviors assume that you have included the `HealthCheckId` element in both resource record sets:\n\n- When the primary resource record set is healthy, Route 53 responds to DNS queries with the applicable value from the primary resource record set regardless of the health of the secondary resource record set.\n- When the primary resource record set is unhealthy and the secondary resource record set is healthy, Route 53 responds to DNS queries with the applicable value from the secondary resource record set.\n- When the secondary resource record set is unhealthy, Route 53 responds to DNS queries with the applicable value from the primary resource record set regardless of the health of the primary resource record set.\n- If you omit the `HealthCheckId` element for the secondary resource record set, and if the primary resource record set is unhealthy, Route 53 always responds to DNS queries with the applicable value from the secondary resource record set. This is true regardless of the health of the associated endpoint.\n\nYou can't create non-failover resource record sets that have the same values for the `Name` and `Type` elements as failover resource record sets.\n\nFor failover alias resource record sets, you must also include the `EvaluateTargetHealth` element and set the value to true.\n\nFor more information about configuring failover for Route 53, see the following topics in the *Amazon Route 53 Developer Guide* :\n\n- [Route 53 Health Checks and DNS Failover](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover.html)\n- [Configuring Failover in a Private Hosted Zone](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-private-hosted-zones.html)", "GeoLocation": "*Geolocation resource record sets only:* A complex type that lets you control how Amazon Route 53 responds to DNS queries based on the geographic origin of the query. For example, if you want all queries from Africa to be routed to a web server with an IP address of `192.0.2.111` , create a resource record set with a `Type` of `A` and a `ContinentCode` of `AF` .\n\nIf you create separate resource record sets for overlapping geographic regions (for example, one resource record set for a continent and one for a country on the same continent), priority goes to the smallest geographic region. This allows you to route most queries for a continent to one resource and to route queries for a country on that continent to a different resource.\n\nYou can't create two geolocation resource record sets that specify the same geographic location.\n\nThe value `*` in the `CountryCode` element matches all geographic locations that aren't specified in other geolocation resource record sets that have the same values for the `Name` and `Type` elements.\n\n> Geolocation works by mapping IP addresses to locations. However, some IP addresses aren't mapped to geographic locations, so even if you create geolocation resource record sets that cover all seven continents, Route 53 will receive some DNS queries from locations that it can't identify. We recommend that you create a resource record set for which the value of `CountryCode` is `*` . Two groups of queries are routed to the resource that you specify in this record: queries that come from locations for which you haven't created geolocation resource record sets and queries from IP addresses that aren't mapped to a location. If you don't create a `*` resource record set, Route 53 returns a \"no answer\" response for queries from those locations. \n\nYou can't create non-geolocation resource record sets that have the same values for the `Name` and `Type` elements as geolocation resource record sets.", + "GeoProximityLocation": "A complex type that contains information about a geographic location.", "HealthCheckId": "If you want Amazon Route 53 to return this resource record set in response to a DNS query only when the status of a health check is healthy, include the `HealthCheckId` element and specify the ID of the applicable health check.\n\nRoute 53 determines whether a resource record set is healthy based on one of the following:\n\n- By periodically sending a request to the endpoint that is specified in the health check\n- By aggregating the status of a specified group of health checks (calculated health checks)\n- By determining the current state of a CloudWatch alarm (CloudWatch metric health checks)\n\n> Route 53 doesn't check the health of the endpoint that is specified in the resource record set, for example, the endpoint specified by the IP address in the `Value` element. When you add a `HealthCheckId` element to a resource record set, Route 53 checks the health of the endpoint that you specified in the health check. \n\nFor more information, see the following topics in the *Amazon Route 53 Developer Guide* :\n\n- [How Amazon Route 53 Determines Whether an Endpoint Is Healthy](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-determining-health-of-endpoints.html)\n- [Route 53 Health Checks and DNS Failover](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover.html)\n- [Configuring Failover in a Private Hosted Zone](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-private-hosted-zones.html)\n\n*When to Specify HealthCheckId*\n\nSpecifying a value for `HealthCheckId` is useful only when Route 53 is choosing between two or more resource record sets to respond to a DNS query, and you want Route 53 to base the choice in part on the status of a health check. Configuring health checks makes sense only in the following configurations:\n\n- *Non-alias resource record sets* : You're checking the health of a group of non-alias resource record sets that have the same routing policy, name, and type (such as multiple weighted records named www.example.com with a type of A) and you specify health check IDs for all the resource record sets.\n\nIf the health check status for a resource record set is healthy, Route 53 includes the record among the records that it responds to DNS queries with.\n\nIf the health check status for a resource record set is unhealthy, Route 53 stops responding to DNS queries using the value for that resource record set.\n\nIf the health check status for all resource record sets in the group is unhealthy, Route 53 considers all resource record sets in the group healthy and responds to DNS queries accordingly.\n- *Alias resource record sets* : You specify the following settings:\n\n- You set `EvaluateTargetHealth` to true for an alias resource record set in a group of resource record sets that have the same routing policy, name, and type (such as multiple weighted records named www.example.com with a type of A).\n- You configure the alias resource record set to route traffic to a non-alias resource record set in the same hosted zone.\n- You specify a health check ID for the non-alias resource record set.\n\nIf the health check status is healthy, Route 53 considers the alias resource record set to be healthy and includes the alias record among the records that it responds to DNS queries with.\n\nIf the health check status is unhealthy, Route 53 stops responding to DNS queries using the alias resource record set.\n\n> The alias resource record set can also route traffic to a *group* of non-alias resource record sets that have the same routing policy, name, and type. In that configuration, associate health checks with all of the resource record sets in the group of non-alias resource record sets.\n\n*Geolocation Routing*\n\nFor geolocation resource record sets, if an endpoint is unhealthy, Route 53 looks for a resource record set for the larger, associated geographic region. For example, suppose you have resource record sets for a state in the United States, for the entire United States, for North America, and a resource record set that has `*` for `CountryCode` is `*` , which applies to all locations. If the endpoint for the state resource record set is unhealthy, Route 53 checks for healthy resource record sets in the following order until it finds a resource record set for which the endpoint is healthy:\n\n- The United States\n- North America\n- The default resource record set\n\n*Specifying the Health Check Endpoint by Domain Name*\n\nIf your health checks specify the endpoint only by domain name, we recommend that you create a separate health check for each endpoint. For example, create a health check for each `HTTP` server that is serving content for `www.example.com` . For the value of `FullyQualifiedDomainName` , specify the domain name of the server (such as `us-east-2-www.example.com` ), not the name of the resource record sets ( `www.example.com` ).\n\n> Health check results will be unpredictable if you do the following:\n> \n> - Create a health check that has the same value for `FullyQualifiedDomainName` as the name of a resource record set.\n> - Associate that health check with the resource record set.", "HostedZoneId": "The ID of the hosted zone that you want to create records in.\n\nSpecify either `HostedZoneName` or `HostedZoneId` , but not both. If you have multiple hosted zones with the same domain name, you must specify the hosted zone using `HostedZoneId` .\n\nDo not provide the `HostedZoneId` if it is already defined in `AWS::Route53::RecordSetGroup` . The creation fails if `HostedZoneId` is defined in both.", "HostedZoneName": "The name of the hosted zone that you want to create records in. You must include a trailing dot (for example, `www.example.com.` ) as part of the `HostedZoneName` .\n\nWhen you create a stack using an `AWS::Route53::RecordSet` that specifies `HostedZoneName` , AWS CloudFormation attempts to find a hosted zone whose name matches the `HostedZoneName` . If AWS CloudFormation can't find a hosted zone with a matching domain name, or if there is more than one hosted zone with the specified domain name, AWS CloudFormation will not create the stack.\n\nSpecify either `HostedZoneName` or `HostedZoneId` , but not both. If you have multiple hosted zones with the same domain name, you must specify the hosted zone using `HostedZoneId` .", @@ -37846,7 +37987,7 @@ "RedrivePolicy": "When specified, sends undeliverable messages to the specified Amazon SQS dead-letter queue. Messages that can't be delivered due to client errors (for example, when the subscribed endpoint is unreachable) or server errors (for example, when the service that powers the subscribed endpoint becomes unavailable) are held in the dead-letter queue for further analysis or reprocessing.\n\nFor more information about the redrive policy and dead-letter queues, see [Amazon SQS dead-letter queues](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-dead-letter-queues.html) in the *Amazon SQS Developer Guide* .", "Region": "For cross-region subscriptions, the region in which the topic resides.\n\nIf no region is specified, AWS CloudFormation uses the region of the caller as the default.\n\nIf you perform an update operation that only updates the `Region` property of a `AWS::SNS::Subscription` resource, that operation will fail unless you are either:\n\n- Updating the `Region` from `NULL` to the caller region.\n- Updating the `Region` from the caller region to `NULL` .", "ReplayPolicy": "", - "SubscriptionRoleArn": "This property applies only to Amazon Kinesis Data Firehose delivery stream subscriptions. Specify the ARN of the IAM role that has the following:\n\n- Permission to write to the Amazon Kinesis Data Firehose delivery stream\n- Amazon SNS listed as a trusted entity\n\nSpecifying a valid ARN for this attribute is required for Kinesis Data Firehose delivery stream subscriptions. For more information, see [Fanout to Amazon Kinesis Data Firehose delivery streams](https://docs.aws.amazon.com/sns/latest/dg/sns-firehose-as-subscriber.html) in the *Amazon SNS Developer Guide.*", + "SubscriptionRoleArn": "This property applies only to Amazon Data Firehose delivery stream subscriptions. Specify the ARN of the IAM role that has the following:\n\n- Permission to write to the Amazon Data Firehose delivery stream\n- Amazon SNS listed as a trusted entity\n\nSpecifying a valid ARN for this attribute is required for Firehose delivery stream subscriptions. For more information, see [Fanout to Amazon Data Firehose delivery streams](https://docs.aws.amazon.com/sns/latest/dg/sns-firehose-as-subscriber.html) in the *Amazon SNS Developer Guide.*", "TopicArn": "The ARN of the topic to subscribe to." }, "AWS::SNS::Topic": { @@ -37927,11 +38068,11 @@ "MaxErrors": "The number of errors that are allowed before the system stops sending requests to run the association on additional targets. You can specify either an absolute number of errors, for example 10, or a percentage of the target set, for example 10%. If you specify 3, for example, the system stops sending requests when the fourth error is received. If you specify 0, then the system stops sending requests after the first error is returned. If you run an association on 50 managed nodes and set `MaxError` to 10%, then the system stops sending the request when the sixth error is received.\n\nExecutions that are already running an association when `MaxErrors` is reached are allowed to complete, but some of these executions may fail as well. If you need to ensure that there won't be more than max-errors failed executions, set `MaxConcurrency` to 1 so that executions proceed one at a time.", "Name": "The name of the SSM document that contains the configuration information for the instance. You can specify `Command` or `Automation` documents. The documents can be AWS -predefined documents, documents you created, or a document that is shared with you from another account. For SSM documents that are shared with you from other AWS accounts , you must specify the complete SSM document ARN, in the following format:\n\n`arn:partition:ssm:region:account-id:document/document-name`\n\nFor example: `arn:aws:ssm:us-east-2:12345678912:document/My-Shared-Document`\n\nFor AWS -predefined documents and SSM documents you created in your account, you only need to specify the document name. For example, `AWS -ApplyPatchBaseline` or `My-Document` .", "OutputLocation": "An Amazon Simple Storage Service (Amazon S3) bucket where you want to store the output details of the request.", - "ParameterValues": "A description of the parameters for a document.", + "Parameters": "The parameters for the runtime configuration of the document.", "ScheduleExpression": "A cron expression that specifies a schedule when the association runs. The schedule runs in Coordinated Universal Time (UTC).", "ScheduleOffset": "Number of days to wait after the scheduled day to run an association.", - "SyncCompliance": "The mode for generating association compliance. You can specify `AUTO` or `MANUAL` . In `AUTO` mode, the system uses the status of the association execution to determine the compliance status. If the association execution runs successfully, then the association is `COMPLIANT` . If the association execution doesn't run successfully, the association is `NON-COMPLIANT` .\n\nIn `MANUAL` mode, you must specify the `AssociationId` as a parameter for the PutComplianceItems API action. In this case, compliance data is not managed by State Manager. It is managed by your direct call to the PutComplianceItems API action.\n\nBy default, all associations use `AUTO` mode.", - "Targets": "The targets for the association. You must specify the `InstanceId` or `Targets` property. You can target all instances in an AWS account by specifying the `InstanceIds` key with a value of `*` . To view a JSON and a YAML example that targets all instances, see \"Create an association for all managed instances in an AWS account \" on the Examples page.", + "SyncCompliance": "The mode for generating association compliance. You can specify `AUTO` or `MANUAL` . In `AUTO` mode, the system uses the status of the association execution to determine the compliance status. If the association execution runs successfully, then the association is `COMPLIANT` . If the association execution doesn't run successfully, the association is `NON-COMPLIANT` .\n\nIn `MANUAL` mode, you must specify the `AssociationId` as a parameter for the `PutComplianceItems` API action. In this case, compliance data is not managed by State Manager. It is managed by your direct call to the `PutComplianceItems` API action.\n\nBy default, all associations use `AUTO` mode.", + "Targets": "The targets for the association. You must specify the `InstanceId` or `Targets` property. You can target all instances in an AWS account by specifying t he `InstanceIds` key with a value of `*` .\n\nSupported formats include the following.\n\n- `Key=InstanceIds,Values=,,`\n- `Key=tag-key,Values=,`\n\nTo view a JSON and a YAML example that targets all instances, see \"Create an association for all managed instances in an AWS account \" on the Examples page.", "WaitForSuccessTimeoutSeconds": "The number of seconds the service should wait for the association status to show \"Success\" before proceeding with the stack execution. If the association status doesn't show \"Success\" after the specified number of seconds, then stack creation fails.\n\n> When you specify a value for the `WaitForSuccessTimeoutSeconds` , [drift detection](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-stack-drift.html) for your AWS CloudFormation stack\u2019s configuration might yield inaccurate results. If drift detection is important in your scenario, we recommend that you don\u2019t include `WaitForSuccessTimeoutSeconds` in your template." }, "AWS::SSM::Association InstanceAssociationOutputLocation": { @@ -37949,7 +38090,7 @@ "AWS::SSM::Document": { "Attachments": "A list of key-value pairs that describe attachments to a version of a document.", "Content": "The content for the new SSM document in JSON or YAML. For more information about the schemas for SSM document content, see [SSM document schema features and examples](https://docs.aws.amazon.com/systems-manager/latest/userguide/document-schemas-features.html) in the *AWS Systems Manager User Guide* .\n\n> This parameter also supports `String` data types.", - "DocumentFormat": "Specify the document format for the request. JSON is the default format.", + "DocumentFormat": "Specify the document format for the request. `JSON` is the default format.", "DocumentType": "The type of document to create.", "Name": "A name for the SSM document.\n\n> You can't use the following strings as document name prefixes. These are reserved by AWS for use as document name prefixes:\n> \n> - `aws`\n> - `amazon`\n> - `amzn`", "Requires": "A list of SSM documents required by a document. This parameter is used exclusively by AWS AppConfig . When a user creates an AWS AppConfig configuration in an SSM document, the user must also specify a required document for validation purposes. In this case, an `ApplicationConfiguration` document requires an `ApplicationConfigurationSchema` document for validation purposes. For more information, see [What is AWS AppConfig ?](https://docs.aws.amazon.com/appconfig/latest/userguide/what-is-appconfig.html) in the *AWS AppConfig User Guide* .", @@ -37981,7 +38122,7 @@ "Schedule": "The schedule of the maintenance window in the form of a cron or rate expression.", "ScheduleOffset": "The number of days to wait to run a maintenance window after the scheduled cron expression date and time.", "ScheduleTimezone": "The time zone that the scheduled maintenance window executions are based on, in Internet Assigned Numbers Authority (IANA) format.", - "StartDate": "The date and time, in ISO-8601 Extended format, for when the maintenance window is scheduled to become active. StartDate allows you to delay activation of the Maintenance Window until the specified future date.", + "StartDate": "The date and time, in ISO-8601 Extended format, for when the maintenance window is scheduled to become active. `StartDate` allows you to delay activation of the maintenance window until the specified future date.", "Tags": "Optional metadata that you assign to a resource in the form of an arbitrary set of tags (key-value pairs). Tags enable you to categorize a resource in different ways, such as by purpose, owner, or environment. For example, you might want to tag a maintenance window to identify the type of tasks it will run, the types of targets, and the environment it will run in." }, "AWS::SSM::MaintenanceWindow Tag": { @@ -38027,7 +38168,7 @@ }, "AWS::SSM::MaintenanceWindowTask MaintenanceWindowAutomationParameters": { "DocumentVersion": "The version of an Automation runbook to use during task execution.", - "Parameters": "The parameters for the AUTOMATION task." + "Parameters": "The parameters for the `AUTOMATION` type task." }, "AWS::SSM::MaintenanceWindowTask MaintenanceWindowLambdaParameters": { "ClientContext": "Client-specific information to pass to the AWS Lambda function that you're invoking. You can then use the `context` variable to process the client information in your AWS Lambda function.", @@ -38067,14 +38208,14 @@ "MaintenanceWindowStepFunctionsParameters": "The parameters for a `STEP_FUNCTIONS` task type." }, "AWS::SSM::Parameter": { - "AllowedPattern": "A regular expression used to validate the parameter value. For example, for String types with values restricted to numbers, you can specify the following: `AllowedPattern=^\\d+$`", + "AllowedPattern": "A regular expression used to validate the parameter value. For example, for `String` types with values restricted to numbers, you can specify the following: `AllowedPattern=^\\d+$`", "DataType": "The data type of the parameter, such as `text` or `aws:ec2:image` . The default is `text` .", "Description": "Information about the parameter.", - "Name": "The name of the parameter.\n\n> The maximum length constraint listed below includes capacity for additional system attributes that aren't part of the name. The maximum length for a parameter name, including the full length of the parameter ARN, is 1011 characters. For example, the length of the following parameter name is 65 characters, not 20 characters: `arn:aws:ssm:us-east-2:111222333444:parameter/ExampleParameterName`", + "Name": "The name of the parameter.\n\n> The maximum length constraint listed below includes capacity for additional system attributes that aren't part of the name. The maximum length for a parameter name, including the full length of the parameter Amazon Resource Name (ARN), is 1011 characters. For example, the length of the following parameter name is 65 characters, not 20 characters: `arn:aws:ssm:us-east-2:111222333444:parameter/ExampleParameterName`", "Policies": "Information about the policies assigned to a parameter.\n\n[Assigning parameter policies](https://docs.aws.amazon.com/systems-manager/latest/userguide/parameter-store-policies.html) in the *AWS Systems Manager User Guide* .", "Tags": "Optional metadata that you assign to a resource in the form of an arbitrary set of tags (key-value pairs). Tags enable you to categorize a resource in different ways, such as by purpose, owner, or environment. For example, you might want to tag a Systems Manager parameter to identify the type of resource to which it applies, the environment, or the type of configuration data referenced by the parameter.", "Tier": "The parameter tier.", - "Type": "The type of parameter.\n\n> Although `SecureString` is included in the list of valid values, AWS CloudFormation does *not* currently support creating a `SecureString` parameter type.", + "Type": "The type of parameter.", "Value": "The parameter value.\n\n> If type is `StringList` , the system returns a comma-separated string with no spaces between commas in the `Value` field." }, "AWS::SSM::PatchBaseline": { @@ -38122,7 +38263,7 @@ "BucketName": "The name of the S3 bucket where the aggregated data is stored.", "BucketPrefix": "An Amazon S3 prefix for the bucket.", "BucketRegion": "The AWS Region with the S3 bucket targeted by the resource data sync.", - "KMSKeyArn": "The ARN of an encryption key for a destination in Amazon S3 . You can use a KMS key to encrypt inventory data in Amazon S3 . You must specify a key that exist in the same region as the destination Amazon S3 bucket.", + "KMSKeyArn": "The Amazon Resource Name (ARN) of an encryption key for a destination in Amazon S3 . You can use a KMS key to encrypt inventory data in Amazon S3 . You must specify a key that exist in the same AWS Region as the destination Amazon S3 bucket.", "S3Destination": "Configuration information for the target S3 bucket.", "SyncFormat": "A supported sync format. The following format is currently supported: JsonSerDe", "SyncSource": "Information about the source where the data was synchronized.", @@ -38147,13 +38288,13 @@ }, "AWS::SSM::ResourcePolicy": { "Policy": "A policy you want to associate with a resource.", - "ResourceArn": "Amazon Resource Name (ARN) of the resource to which you want to attach a policy." + "ResourceArn": "The Amazon Resource Name (ARN) of the resource to which you want to attach a policy." }, "AWS::SSMContacts::Contact": { "Alias": "The unique and identifiable alias of the contact or escalation plan.", "DisplayName": "The full name of the contact or escalation plan.", "Plan": "A list of stages. A contact has an engagement plan with stages that contact specified contact channels. An escalation plan uses stages that contact specified contacts.", - "Type": "Refers to the type of contact:\n\n- `PERSONAL` : A single, individual contact.\n- `ESCALATION` : An escalation plan.\n- `ONCALL_SCHEDULE` : An on-call schedule." + "Type": "The type of contact.\n\n- `PERSONAL` : A single, individual contact.\n- `ESCALATION` : An escalation plan.\n- `ONCALL_SCHEDULE` : An on-call schedule." }, "AWS::SSMContacts::Contact ChannelTargetInfo": { "ChannelId": "The Amazon Resource Name (ARN) of the contact channel.", @@ -38242,15 +38383,15 @@ "Tags": "A list of tags to add to the replication set." }, "AWS::SSMIncidents::ReplicationSet RegionConfiguration": { - "SseKmsKeyId": "The KMS key ID to use to encrypt your replication set." + "SseKmsKeyId": "The AWS Key Management Service key ID to use to encrypt your replication set." }, "AWS::SSMIncidents::ReplicationSet ReplicationRegion": { "RegionConfiguration": "Specifies the Region configuration.", "RegionName": "Specifies the region name to add to the replication set." }, "AWS::SSMIncidents::ReplicationSet Tag": { - "Key": "", - "Value": "" + "Key": "The tag key.", + "Value": "The tag value." }, "AWS::SSMIncidents::ResponsePlan": { "Actions": "The actions that the response plan starts at the beginning of an incident.", @@ -38266,7 +38407,7 @@ "SsmAutomation": "Details about the Systems Manager automation document that will be used as a runbook during an incident." }, "AWS::SSMIncidents::ResponsePlan ChatChannel": { - "ChatbotSns": "The SNS targets that AWS Chatbot uses to notify the chat channel of updates to an incident. You can also make updates to the incident through the chat channel by using the SNS topics" + "ChatbotSns": "The Amazon SNS targets that AWS Chatbot uses to notify the chat channel of updates to an incident. You can also make updates to the incident through the chat channel by using the Amazon SNS topics" }, "AWS::SSMIncidents::ResponsePlan DynamicSsmParameter": { "Key": "The key parameter to use when running the Systems Manager Automation runbook.", @@ -38279,7 +38420,7 @@ "DedupeString": "Used to create only one incident record for an incident.", "Impact": "Defines the impact to the customers. Providing an impact overwrites the impact provided by a response plan.\n\n**Possible impacts:** - `1` - Critical impact, this typically relates to full application failure that impacts many to all customers.\n- `2` - High impact, partial application failure with impact to many customers.\n- `3` - Medium impact, the application is providing reduced service to customers.\n- `4` - Low impact, customer might aren't impacted by the problem yet.\n- `5` - No impact, customers aren't currently impacted but urgent action is needed to avoid impact.", "IncidentTags": "Tags to assign to the template. When the `StartIncident` API action is called, Incident Manager assigns the tags specified in the template to the incident.", - "NotificationTargets": "The SNS targets that AWS Chatbot uses to notify the chat channel of updates to an incident. You can also make updates to the incident through the chat channel using the SNS topics.", + "NotificationTargets": "The Amazon Simple Notification Service ( Amazon SNS ) targets that AWS Chatbot uses to notify the chat channel of updates to an incident. You can also make updates to the incident through the chat channel using the Amazon SNS topics.", "Summary": "The summary describes what has happened during the incident.", "Title": "The title of the incident is a brief and easily recognizable." }, @@ -38287,7 +38428,7 @@ "PagerDutyConfiguration": "Information about the PagerDuty service where the response plan creates an incident." }, "AWS::SSMIncidents::ResponsePlan NotificationTargetItem": { - "SnsTopicArn": "The Amazon Resource Name (ARN) of the SNS topic." + "SnsTopicArn": "The Amazon Resource Name (ARN) of the Amazon SNS topic." }, "AWS::SSMIncidents::ResponsePlan PagerDutyConfiguration": { "Name": "The name of the PagerDuty configuration.", @@ -38299,19 +38440,19 @@ }, "AWS::SSMIncidents::ResponsePlan SsmAutomation": { "DocumentName": "The automation document's name.", - "DocumentVersion": "The automation document's version to use when running.", + "DocumentVersion": "The version of the runbook to use when running.", "DynamicParameters": "The key-value pairs to resolve dynamic parameter values when processing a Systems Manager Automation runbook.", - "Parameters": "The key-value pair parameters to use when running the automation document.", + "Parameters": "The key-value pair parameters to use when running the runbook.", "RoleArn": "The Amazon Resource Name (ARN) of the role that the automation document will assume when running commands.", "TargetAccount": "The account that the automation document will be run in. This can be in either the management account or an application account." }, "AWS::SSMIncidents::ResponsePlan SsmParameter": { - "Key": "The key parameter to use when running the automation document.", - "Values": "The value parameter to use when running the automation document." + "Key": "The key parameter to use when running the Automation runbook.", + "Values": "The value parameter to use when running the Automation runbook." }, "AWS::SSMIncidents::ResponsePlan Tag": { - "Key": "", - "Value": "" + "Key": "The tag key.", + "Value": "The tag value." }, "AWS::SSO::Assignment": { "InstanceArn": "The ARN of the IAM Identity Center instance under which the operation will be executed. For more information about ARNs, see [Amazon Resource Names (ARNs) and AWS Service Namespaces](https://docs.aws.amazon.com//general/latest/gr/aws-arns-and-namespaces.html) in the *AWS General Reference* .", @@ -38834,7 +38975,7 @@ "TableName": "The name of the Glue table." }, "AWS::SageMaker::FeatureGroup FeatureDefinition": { - "FeatureName": "The name of a feature. The type must be a string. `FeatureName` cannot be any of the following: `is_deleted` , `write_time` , `api_invocation_time` .", + "FeatureName": "The name of a feature. The type must be a string. `FeatureName` cannot be any of the following: `is_deleted` , `write_time` , `api_invocation_time` .\n\nThe name:\n\n- Must start and end with an alphanumeric character.\n- Can only include alphanumeric characters, underscores, and hyphens. Spaces are not allowed.", "FeatureType": "The value type of a feature. Valid values are Integral, Fractional, or String." }, "AWS::SageMaker::FeatureGroup OfflineStoreConfig": { @@ -38846,7 +38987,8 @@ "AWS::SageMaker::FeatureGroup OnlineStoreConfig": { "EnableOnlineStore": "Turn `OnlineStore` off by specifying `False` for the `EnableOnlineStore` flag. Turn `OnlineStore` on by specifying `True` for the `EnableOnlineStore` flag.\n\nThe default value is `False` .", "SecurityConfig": "Use to specify KMS Key ID ( `KMSKeyId` ) for at-rest encryption of your `OnlineStore` .", - "StorageType": "Option for different tiers of low latency storage for real-time data retrieval.\n\n- `Standard` : A managed low latency data store for feature groups.\n- `InMemory` : A managed data store for feature groups that supports very low latency retrieval." + "StorageType": "Option for different tiers of low latency storage for real-time data retrieval.\n\n- `Standard` : A managed low latency data store for feature groups.\n- `InMemory` : A managed data store for feature groups that supports very low latency retrieval.", + "TtlDuration": "Time to live duration, where the record is hard deleted after the expiration time is reached; `ExpiresAt` = `EventTime` + `TtlDuration` . For information on HardDelete, see the [DeleteRecord](https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_feature_store_DeleteRecord.html) API in the Amazon SageMaker API Reference guide." }, "AWS::SageMaker::FeatureGroup OnlineStoreSecurityConfig": { "KmsKeyId": "The AWS Key Management Service (KMS) key ARN that SageMaker Feature Store uses to encrypt the Amazon S3 objects at rest using Amazon S3 server-side encryption.\n\nThe caller (either user or IAM role) of `CreateFeatureGroup` must have below permissions to the `OnlineStore` `KmsKeyId` :\n\n- `\"kms:Encrypt\"`\n- `\"kms:Decrypt\"`\n- `\"kms:DescribeKey\"`\n- `\"kms:CreateGrant\"`\n- `\"kms:RetireGrant\"`\n- `\"kms:ReEncryptFrom\"`\n- `\"kms:ReEncryptTo\"`\n- `\"kms:GenerateDataKey\"`\n- `\"kms:ListAliases\"`\n- `\"kms:ListGrants\"`\n- `\"kms:RevokeGrant\"`\n\nThe caller (either user or IAM role) to all DataPlane operations ( `PutRecord` , `GetRecord` , `DeleteRecord` ) must have the following permissions to the `KmsKeyId` :\n\n- `\"kms:Decrypt\"`" @@ -38864,6 +39006,10 @@ "ProvisionedWriteCapacityUnits": "For provisioned feature groups, this indicates the write throughput you are billed for and can consume without throttling.\n\nThis field is not applicable for on-demand feature groups.", "ThroughputMode": "The mode used for your feature group throughput: `ON_DEMAND` or `PROVISIONED` ." }, + "AWS::SageMaker::FeatureGroup TtlDuration": { + "Unit": "`TtlDuration` time unit.", + "Value": "`TtlDuration` time value." + }, "AWS::SageMaker::Image": { "ImageDescription": "The description of the image.", "ImageDisplayName": "The display name of the image.\n\n*Length Constraints* : Minimum length of 1. Maximum length of 128.\n\n*Pattern* : `^\\S(.*\\S)?$`", @@ -39498,7 +39644,7 @@ "DataInputConfig": "The input configuration object for the model." }, "AWS::SageMaker::ModelPackage ModelMetrics": { - "Bias": "Metrics that measure bais in a model.", + "Bias": "Metrics that measure bias in a model.", "Explainability": "Metrics that help explain a model.", "ModelDataQuality": "Metrics that measure the quality of the input data for a model.", "ModelQuality": "Metrics that measure the quality of a model." @@ -40062,7 +40208,7 @@ "WorkteamName": "The name of the work team." }, "AWS::SageMaker::Workteam CognitoMemberDefinition": { - "CognitoClientId": "An identifier for an application client. You must create the app client ID using Amazon Cognito.", + "CognitoClientId": "An identifier for an application client. You must create the app client ID using Amazon Cognito .", "CognitoUserGroup": "An identifier for a user group.", "CognitoUserPool": "An identifier for a user pool. The user pool must be in the same region as the service that you are calling." }, @@ -41666,7 +41812,6 @@ "LabelNameCondition": "A single label name condition. This is the fully qualified label name that a log record must contain in order to meet the condition. Fully qualified labels have a prefix, optional namespaces, and label name. The prefix identifies the rule group or web ACL context of the rule that added the label." }, "AWS::WAFv2::LoggingConfiguration FieldToMatch": { - "JsonBody": "Redact the request body JSON.", "Method": "Redact the indicated HTTP method. The method indicates the type of operation that the request is asking the origin to perform.", "QueryString": "Redact the query string. This is the part of a URL that appears after a `?` character, if any.", "SingleHeader": "Redact a single header. Provide the name of the header to inspect, for example, `User-Agent` or `Referer` . This setting isn't case sensitive.\n\nExample JSON: `\"SingleHeader\": { \"Name\": \"haystack\" }`", @@ -41677,11 +41822,6 @@ "Conditions": "Match conditions for the filter.", "Requirement": "Logic to apply to the filtering conditions. You can specify that, in order to satisfy the filter, a log must match all conditions or must match at least one condition." }, - "AWS::WAFv2::LoggingConfiguration JsonBody": { - "InvalidFallbackBehavior": "What AWS WAF should do if it fails to completely parse the JSON body. The options are the following:\n\n- `EVALUATE_AS_STRING` - Inspect the body as plain text. AWS WAF applies the text transformations and inspection criteria that you defined for the JSON inspection to the body text string.\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement.\n\nIf you don't provide this setting, AWS WAF parses and evaluates the content only up to the first parsing failure that it encounters.\n\nAWS WAF does its best to parse the entire JSON body, but might be forced to stop for reasons such as invalid characters, duplicate keys, truncation, and any content whose root node isn't an object or an array.\n\nAWS WAF parses the JSON in the following examples as two valid key, value pairs:\n\n- Missing comma: `{\"key1\":\"value1\"\"key2\":\"value2\"}`\n- Missing colon: `{\"key1\":\"value1\",\"key2\"\"value2\"}`\n- Extra colons: `{\"key1\"::\"value1\",\"key2\"\"value2\"}`", - "MatchPattern": "The patterns to look for in the JSON body. AWS WAF inspects the results of these pattern matches against the rule inspection criteria.", - "MatchScope": "The parts of the JSON to match against using the `MatchPattern` . If you specify `ALL` , AWS WAF matches against keys and values.\n\n`All` does not require a match to be found in the keys and a match to be found in the values. It requires a match to be found in the keys or the values or both. To require a match in the keys and in the values, use a logical `AND` statement to combine two match rules, one that inspects the keys and another that inspects the values." - }, "AWS::WAFv2::LoggingConfiguration LabelNameCondition": { "LabelName": "The label name that a log record must contain in order to meet the condition. This must be a fully qualified label name. Fully qualified labels have a prefix, optional namespaces, and label name. The prefix identifies the rule group or web ACL context of the rule that added the label." }, @@ -41689,10 +41829,6 @@ "DefaultBehavior": "Default handling for logs that don't match any of the specified filtering conditions.", "Filters": "The filters that you want to apply to the logs." }, - "AWS::WAFv2::LoggingConfiguration MatchPattern": { - "All": "Match all of the elements.\n\nYou must specify either this setting or the `IncludedPaths` setting, but not both.", - "IncludedPaths": "Match only the specified include paths.\n\nProvide the include paths using JSON Pointer syntax. For example, `\"IncludedPaths\": [\"/dogs/0/name\", \"/dogs/1/name\"]` . For information about this syntax, see the Internet Engineering Task Force (IETF) documentation [JavaScript Object Notation (JSON) Pointer](https://docs.aws.amazon.com/https://tools.ietf.org/html/rfc6901) .\n\nYou must specify either this setting or the `All` setting, but not both.\n\n> Don't use this option to include all paths. Instead, use the `All` setting." - }, "AWS::WAFv2::LoggingConfiguration SingleHeader": { "Name": "The name of the query header to inspect." }, @@ -41938,7 +42074,7 @@ "LabelMatchStatement": "A rule statement to match against labels that have been added to the web request by rules that have already run in the web ACL.\n\nThe label match statement provides the label or namespace string to search for. The label string can represent a part or all of the fully qualified label name that had been added to the web request. Fully qualified labels have a prefix, optional namespaces, and label name. The prefix identifies the rule group or web ACL context of the rule that added the label. If you do not provide the fully qualified name in your label match string, AWS WAF performs the search for labels that were added in the same context as the label match statement.", "NotStatement": "A logical rule statement used to negate the results of another rule statement. You provide one `Statement` within the `NotStatement` .", "OrStatement": "A logical rule statement used to combine other rule statements with OR logic. You provide more than one `Statement` within the `OrStatement` .", - "RateBasedStatement": "A rate-based rule counts incoming requests and rate limits requests when they are coming at too fast a rate. The rule categorizes requests according to your aggregation criteria, collects them into aggregation instances, and counts and rate limits the requests for each instance.\n\nYou can specify individual aggregation keys, like IP address or HTTP method. You can also specify aggregation key combinations, like IP address and HTTP method, or HTTP method, query argument, and cookie.\n\nEach unique set of values for the aggregation keys that you specify is a separate aggregation instance, with the value from each key contributing to the aggregation instance definition.\n\nFor example, assume the rule evaluates web requests with the following IP address and HTTP method values:\n\n- IP address 10.1.1.1, HTTP method POST\n- IP address 10.1.1.1, HTTP method GET\n- IP address 127.0.0.0, HTTP method POST\n- IP address 10.1.1.1, HTTP method GET\n\nThe rule would create different aggregation instances according to your aggregation criteria, for example:\n\n- If the aggregation criteria is just the IP address, then each individual address is an aggregation instance, and AWS WAF counts requests separately for each. The aggregation instances and request counts for our example would be the following:\n\n- IP address 10.1.1.1: count 3\n- IP address 127.0.0.0: count 1\n- If the aggregation criteria is HTTP method, then each individual HTTP method is an aggregation instance. The aggregation instances and request counts for our example would be the following:\n\n- HTTP method POST: count 2\n- HTTP method GET: count 2\n- If the aggregation criteria is IP address and HTTP method, then each IP address and each HTTP method would contribute to the combined aggregation instance. The aggregation instances and request counts for our example would be the following:\n\n- IP address 10.1.1.1, HTTP method POST: count 1\n- IP address 10.1.1.1, HTTP method GET: count 2\n- IP address 127.0.0.0, HTTP method POST: count 1\n\nFor any n-tuple of aggregation keys, each unique combination of values for the keys defines a separate aggregation instance, which AWS WAF counts and rate-limits individually.\n\nYou can optionally nest another statement inside the rate-based statement, to narrow the scope of the rule so that it only counts and rate limits requests that match the nested statement. You can use this nested scope-down statement in conjunction with your aggregation key specifications or you can just count and rate limit all requests that match the scope-down statement, without additional aggregation. When you choose to just manage all requests that match a scope-down statement, the aggregation instance is singular for the rule.\n\nYou cannot nest a `RateBasedStatement` inside another statement, for example inside a `NotStatement` or `OrStatement` . You can define a `RateBasedStatement` inside a web ACL and inside a rule group.\n\nFor additional information about the options, see [Rate limiting web requests using rate-based rules](https://docs.aws.amazon.com/waf/latest/developerguide/waf-rate-based-rules.html) in the *AWS WAF Developer Guide* .\n\nIf you only aggregate on the individual IP address or forwarded IP address, you can retrieve the list of IP addresses that AWS WAF is currently rate limiting for a rule through the API call `GetRateBasedStatementManagedKeys` . This option is not available for other aggregation configurations.\n\nAWS WAF tracks and manages web requests separately for each instance of a rate-based rule that you use. For example, if you provide the same rate-based rule settings in two web ACLs, each of the two rule statements represents a separate instance of the rate-based rule and gets its own tracking and management by AWS WAF . If you define a rate-based rule inside a rule group, and then use that rule group in multiple places, each use creates a separate instance of the rate-based rule that gets its own tracking and management by AWS WAF .", + "RateBasedStatement": "A rate-based rule counts incoming requests and rate limits requests when they are coming at too fast a rate. The rule categorizes requests according to your aggregation criteria, collects them into aggregation instances, and counts and rate limits the requests for each instance.\n\n> If you change any of these settings in a rule that's currently in use, the change resets the rule's rate limiting counts. This can pause the rule's rate limiting activities for up to a minute. \n\nYou can specify individual aggregation keys, like IP address or HTTP method. You can also specify aggregation key combinations, like IP address and HTTP method, or HTTP method, query argument, and cookie.\n\nEach unique set of values for the aggregation keys that you specify is a separate aggregation instance, with the value from each key contributing to the aggregation instance definition.\n\nFor example, assume the rule evaluates web requests with the following IP address and HTTP method values:\n\n- IP address 10.1.1.1, HTTP method POST\n- IP address 10.1.1.1, HTTP method GET\n- IP address 127.0.0.0, HTTP method POST\n- IP address 10.1.1.1, HTTP method GET\n\nThe rule would create different aggregation instances according to your aggregation criteria, for example:\n\n- If the aggregation criteria is just the IP address, then each individual address is an aggregation instance, and AWS WAF counts requests separately for each. The aggregation instances and request counts for our example would be the following:\n\n- IP address 10.1.1.1: count 3\n- IP address 127.0.0.0: count 1\n- If the aggregation criteria is HTTP method, then each individual HTTP method is an aggregation instance. The aggregation instances and request counts for our example would be the following:\n\n- HTTP method POST: count 2\n- HTTP method GET: count 2\n- If the aggregation criteria is IP address and HTTP method, then each IP address and each HTTP method would contribute to the combined aggregation instance. The aggregation instances and request counts for our example would be the following:\n\n- IP address 10.1.1.1, HTTP method POST: count 1\n- IP address 10.1.1.1, HTTP method GET: count 2\n- IP address 127.0.0.0, HTTP method POST: count 1\n\nFor any n-tuple of aggregation keys, each unique combination of values for the keys defines a separate aggregation instance, which AWS WAF counts and rate-limits individually.\n\nYou can optionally nest another statement inside the rate-based statement, to narrow the scope of the rule so that it only counts and rate limits requests that match the nested statement. You can use this nested scope-down statement in conjunction with your aggregation key specifications or you can just count and rate limit all requests that match the scope-down statement, without additional aggregation. When you choose to just manage all requests that match a scope-down statement, the aggregation instance is singular for the rule.\n\nYou cannot nest a `RateBasedStatement` inside another statement, for example inside a `NotStatement` or `OrStatement` . You can define a `RateBasedStatement` inside a web ACL and inside a rule group.\n\nFor additional information about the options, see [Rate limiting web requests using rate-based rules](https://docs.aws.amazon.com/waf/latest/developerguide/waf-rate-based-rules.html) in the *AWS WAF Developer Guide* .\n\nIf you only aggregate on the individual IP address or forwarded IP address, you can retrieve the list of IP addresses that AWS WAF is currently rate limiting for a rule through the API call `GetRateBasedStatementManagedKeys` . This option is not available for other aggregation configurations.\n\nAWS WAF tracks and manages web requests separately for each instance of a rate-based rule that you use. For example, if you provide the same rate-based rule settings in two web ACLs, each of the two rule statements represents a separate instance of the rate-based rule and gets its own tracking and management by AWS WAF . If you define a rate-based rule inside a rule group, and then use that rule group in multiple places, each use creates a separate instance of the rate-based rule that gets its own tracking and management by AWS WAF .", "RegexMatchStatement": "A rule statement used to search web request components for a match against a single regular expression.", "RegexPatternSetReferenceStatement": "A rule statement used to search web request components for matches with regular expressions. To use this, create a `RegexPatternSet` that specifies the expressions that you want to detect, then use the ARN of that set in this statement. A web request matches the pattern set rule statement if the request component matches any of the patterns in the set.\n\nEach regex pattern set rule statement references a regex pattern set. You create and maintain the set independent of your rules. This allows you to use the single set in multiple rules. When you update the referenced set, AWS WAF automatically updates all rules that reference it.", "SizeConstraintStatement": "A rule statement that compares a number of bytes against the size of a request component, using a comparison operator, such as greater than (>) or less than (<). For example, you can use a size constraint statement to look for query strings that are longer than 100 bytes.\n\nIf you configure AWS WAF to inspect the request body, AWS WAF inspects only the number of bytes of the body up to the limit for the web ACL. By default, for regional web ACLs, this limit is 8 KB (8,192 bytes) and for CloudFront web ACLs, this limit is 16 KB (16,384 bytes). For CloudFront web ACLs, you can increase the limit in the web ACL `AssociationConfig` , for additional fees. If you know that the request body for your web requests should never exceed the inspection limit, you could use a size constraint statement to block requests that have a larger request body size.\n\nIf you choose URI for the value of Part of the request to filter on, the slash (/) in the URI counts as one character. For example, the URI `/logo.jpg` is nine characters long.", @@ -42295,7 +42431,7 @@ "ManagedRuleGroupStatement": "A rule statement used to run the rules that are defined in a managed rule group. To use this, provide the vendor name and the name of the rule group in this statement. You can retrieve the required names through the API call `ListAvailableManagedRuleGroups` .\n\nYou cannot nest a `ManagedRuleGroupStatement` , for example for use inside a `NotStatement` or `OrStatement` . It can only be referenced as a top-level statement within a rule.\n\n> You are charged additional fees when you use the AWS WAF Bot Control managed rule group `AWSManagedRulesBotControlRuleSet` , the AWS WAF Fraud Control account takeover prevention (ATP) managed rule group `AWSManagedRulesATPRuleSet` , or the AWS WAF Fraud Control account creation fraud prevention (ACFP) managed rule group `AWSManagedRulesACFPRuleSet` . For more information, see [AWS WAF Pricing](https://docs.aws.amazon.com/waf/pricing/) .", "NotStatement": "A logical rule statement used to negate the results of another rule statement. You provide one `Statement` within the `NotStatement` .", "OrStatement": "A logical rule statement used to combine other rule statements with OR logic. You provide more than one `Statement` within the `OrStatement` .", - "RateBasedStatement": "A rate-based rule counts incoming requests and rate limits requests when they are coming at too fast a rate. The rule categorizes requests according to your aggregation criteria, collects them into aggregation instances, and counts and rate limits the requests for each instance.\n\nYou can specify individual aggregation keys, like IP address or HTTP method. You can also specify aggregation key combinations, like IP address and HTTP method, or HTTP method, query argument, and cookie.\n\nEach unique set of values for the aggregation keys that you specify is a separate aggregation instance, with the value from each key contributing to the aggregation instance definition.\n\nFor example, assume the rule evaluates web requests with the following IP address and HTTP method values:\n\n- IP address 10.1.1.1, HTTP method POST\n- IP address 10.1.1.1, HTTP method GET\n- IP address 127.0.0.0, HTTP method POST\n- IP address 10.1.1.1, HTTP method GET\n\nThe rule would create different aggregation instances according to your aggregation criteria, for example:\n\n- If the aggregation criteria is just the IP address, then each individual address is an aggregation instance, and AWS WAF counts requests separately for each. The aggregation instances and request counts for our example would be the following:\n\n- IP address 10.1.1.1: count 3\n- IP address 127.0.0.0: count 1\n- If the aggregation criteria is HTTP method, then each individual HTTP method is an aggregation instance. The aggregation instances and request counts for our example would be the following:\n\n- HTTP method POST: count 2\n- HTTP method GET: count 2\n- If the aggregation criteria is IP address and HTTP method, then each IP address and each HTTP method would contribute to the combined aggregation instance. The aggregation instances and request counts for our example would be the following:\n\n- IP address 10.1.1.1, HTTP method POST: count 1\n- IP address 10.1.1.1, HTTP method GET: count 2\n- IP address 127.0.0.0, HTTP method POST: count 1\n\nFor any n-tuple of aggregation keys, each unique combination of values for the keys defines a separate aggregation instance, which AWS WAF counts and rate-limits individually.\n\nYou can optionally nest another statement inside the rate-based statement, to narrow the scope of the rule so that it only counts and rate limits requests that match the nested statement. You can use this nested scope-down statement in conjunction with your aggregation key specifications or you can just count and rate limit all requests that match the scope-down statement, without additional aggregation. When you choose to just manage all requests that match a scope-down statement, the aggregation instance is singular for the rule.\n\nYou cannot nest a `RateBasedStatement` inside another statement, for example inside a `NotStatement` or `OrStatement` . You can define a `RateBasedStatement` inside a web ACL and inside a rule group.\n\nFor additional information about the options, see [Rate limiting web requests using rate-based rules](https://docs.aws.amazon.com/waf/latest/developerguide/waf-rate-based-rules.html) in the *AWS WAF Developer Guide* .\n\nIf you only aggregate on the individual IP address or forwarded IP address, you can retrieve the list of IP addresses that AWS WAF is currently rate limiting for a rule through the API call `GetRateBasedStatementManagedKeys` . This option is not available for other aggregation configurations.\n\nAWS WAF tracks and manages web requests separately for each instance of a rate-based rule that you use. For example, if you provide the same rate-based rule settings in two web ACLs, each of the two rule statements represents a separate instance of the rate-based rule and gets its own tracking and management by AWS WAF . If you define a rate-based rule inside a rule group, and then use that rule group in multiple places, each use creates a separate instance of the rate-based rule that gets its own tracking and management by AWS WAF .", + "RateBasedStatement": "A rate-based rule counts incoming requests and rate limits requests when they are coming at too fast a rate. The rule categorizes requests according to your aggregation criteria, collects them into aggregation instances, and counts and rate limits the requests for each instance.\n\n> If you change any of these settings in a rule that's currently in use, the change resets the rule's rate limiting counts. This can pause the rule's rate limiting activities for up to a minute. \n\nYou can specify individual aggregation keys, like IP address or HTTP method. You can also specify aggregation key combinations, like IP address and HTTP method, or HTTP method, query argument, and cookie.\n\nEach unique set of values for the aggregation keys that you specify is a separate aggregation instance, with the value from each key contributing to the aggregation instance definition.\n\nFor example, assume the rule evaluates web requests with the following IP address and HTTP method values:\n\n- IP address 10.1.1.1, HTTP method POST\n- IP address 10.1.1.1, HTTP method GET\n- IP address 127.0.0.0, HTTP method POST\n- IP address 10.1.1.1, HTTP method GET\n\nThe rule would create different aggregation instances according to your aggregation criteria, for example:\n\n- If the aggregation criteria is just the IP address, then each individual address is an aggregation instance, and AWS WAF counts requests separately for each. The aggregation instances and request counts for our example would be the following:\n\n- IP address 10.1.1.1: count 3\n- IP address 127.0.0.0: count 1\n- If the aggregation criteria is HTTP method, then each individual HTTP method is an aggregation instance. The aggregation instances and request counts for our example would be the following:\n\n- HTTP method POST: count 2\n- HTTP method GET: count 2\n- If the aggregation criteria is IP address and HTTP method, then each IP address and each HTTP method would contribute to the combined aggregation instance. The aggregation instances and request counts for our example would be the following:\n\n- IP address 10.1.1.1, HTTP method POST: count 1\n- IP address 10.1.1.1, HTTP method GET: count 2\n- IP address 127.0.0.0, HTTP method POST: count 1\n\nFor any n-tuple of aggregation keys, each unique combination of values for the keys defines a separate aggregation instance, which AWS WAF counts and rate-limits individually.\n\nYou can optionally nest another statement inside the rate-based statement, to narrow the scope of the rule so that it only counts and rate limits requests that match the nested statement. You can use this nested scope-down statement in conjunction with your aggregation key specifications or you can just count and rate limit all requests that match the scope-down statement, without additional aggregation. When you choose to just manage all requests that match a scope-down statement, the aggregation instance is singular for the rule.\n\nYou cannot nest a `RateBasedStatement` inside another statement, for example inside a `NotStatement` or `OrStatement` . You can define a `RateBasedStatement` inside a web ACL and inside a rule group.\n\nFor additional information about the options, see [Rate limiting web requests using rate-based rules](https://docs.aws.amazon.com/waf/latest/developerguide/waf-rate-based-rules.html) in the *AWS WAF Developer Guide* .\n\nIf you only aggregate on the individual IP address or forwarded IP address, you can retrieve the list of IP addresses that AWS WAF is currently rate limiting for a rule through the API call `GetRateBasedStatementManagedKeys` . This option is not available for other aggregation configurations.\n\nAWS WAF tracks and manages web requests separately for each instance of a rate-based rule that you use. For example, if you provide the same rate-based rule settings in two web ACLs, each of the two rule statements represents a separate instance of the rate-based rule and gets its own tracking and management by AWS WAF . If you define a rate-based rule inside a rule group, and then use that rule group in multiple places, each use creates a separate instance of the rate-based rule that gets its own tracking and management by AWS WAF .", "RegexMatchStatement": "A rule statement used to search web request components for a match against a single regular expression.", "RegexPatternSetReferenceStatement": "A rule statement used to search web request components for matches with regular expressions. To use this, create a `RegexPatternSet` that specifies the expressions that you want to detect, then use the ARN of that set in this statement. A web request matches the pattern set rule statement if the request component matches any of the patterns in the set.\n\nEach regex pattern set rule statement references a regex pattern set. You create and maintain the set independent of your rules. This allows you to use the single set in multiple rules. When you update the referenced set, AWS WAF automatically updates all rules that reference it.", "RuleGroupReferenceStatement": "A rule statement used to run the rules that are defined in a `RuleGroup` . To use this, create a rule group with your rules, then provide the ARN of the rule group in this statement.\n\nYou cannot nest a `RuleGroupReferenceStatement` , for example for use inside a `NotStatement` or `OrStatement` . You cannot use a rule group reference statement inside another rule group. You can only reference a rule group as a top-level statement within a rule that you define in a web ACL.", diff --git a/schema_source/cloudformation.schema.json b/schema_source/cloudformation.schema.json index 7f76210be..c2258435f 100644 --- a/schema_source/cloudformation.schema.json +++ b/schema_source/cloudformation.schema.json @@ -1216,12 +1216,12 @@ "additionalProperties": false, "properties": { "Data": { - "markdownDescription": "The rules definition file for this namespace.", + "markdownDescription": "The rules file used in the namespace.\n\nFor more details about the rules file, see [Creating a rules file](https://docs.aws.amazon.com/prometheus/latest/userguide/AMP-ruler-rulesfile.html) in the *Amazon Managed Service for Prometheus User Guide* .", "title": "Data", "type": "string" }, "Name": { - "markdownDescription": "The name of the rule groups namespace. This property is required.", + "markdownDescription": "The name of the rule groups namespace.", "title": "Name", "type": "string" }, @@ -1229,12 +1229,12 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "A list of key and value pairs for the workspace resources.", + "markdownDescription": "The list of tag keys and values that are associated with the rule groups namespace.", "title": "Tags", "type": "array" }, "Workspace": { - "markdownDescription": "The ARN of the workspace that contains this rule groups namespace.", + "markdownDescription": "An Amazon Managed Service for Prometheus workspace is a logical and isolated Prometheus server dedicated to ingesting, storing, and querying your Prometheus-compatible metrics.", "title": "Workspace", "type": "string" } @@ -1303,30 +1303,30 @@ "additionalProperties": false, "properties": { "AlertManagerDefinition": { - "markdownDescription": "The alert manager definition for the workspace, as a string. For more information, see [Alert manager and templating](https://docs.aws.amazon.com/prometheus/latest/userguide/AMP-alert-manager.html) .", + "markdownDescription": "The alert manager definition, a YAML configuration for the alert manager in your Amazon Managed Service for Prometheus workspace.\n\nFor details about the alert manager definition, see [Creating an alert manager configuration files](https://docs.aws.amazon.com/prometheus/latest/userguide/AMP-alertmanager-config.html) in the *Amazon Managed Service for Prometheus User Guide* .\n\nThe following example shows part of a CloudFormation YAML file with an embedded alert manager definition (following the `- |-` ).\n\n`Workspace: Type: AWS::APS::Workspace .... Properties: .... AlertManagerDefinition: Fn::Sub: - |- alertmanager_config: | templates: - 'default_template' route: receiver: example-sns receivers: - name: example-sns sns_configs: - topic_arn: 'arn:aws:sns:${AWS::Region}:${AWS::AccountId}:${TopicName}' -`", "title": "AlertManagerDefinition", "type": "string" }, "Alias": { - "markdownDescription": "An alias that you assign to this workspace to help you identify it. It does not need to be unique.\n\nThe alias can be as many as 100 characters and can include any type of characters. Amazon Managed Service for Prometheus automatically strips any blank spaces from the beginning and end of the alias that you specify.", + "markdownDescription": "The alias that is assigned to this workspace to help identify it. It does not need to be unique.", "title": "Alias", "type": "string" }, "KmsKeyArn": { - "markdownDescription": "", + "markdownDescription": "(optional) The ARN for a customer managed AWS KMS key to use for encrypting data within your workspace. For more information about using your own key in your workspace, see [Encryption at rest](https://docs.aws.amazon.com/prometheus/latest/userguide/encryption-at-rest-Amazon-Service-Prometheus.html) in the *Amazon Managed Service for Prometheus User Guide* .", "title": "KmsKeyArn", "type": "string" }, "LoggingConfiguration": { "$ref": "#/definitions/AWS::APS::Workspace.LoggingConfiguration", - "markdownDescription": "The LoggingConfiguration attribute is used to set the logging configuration for the workspace.", + "markdownDescription": "Contains information about the logging configuration for the workspace.", "title": "LoggingConfiguration" }, "Tags": { "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "A list of tag keys and values to associate with the workspace.", + "markdownDescription": "The list of tag keys and values that are associated with the workspace.", "title": "Tags", "type": "array" } @@ -1357,7 +1357,7 @@ "additionalProperties": false, "properties": { "LogGroupArn": { - "markdownDescription": "The Amazon Resource Name (ARN) of the CloudWatch log group the logs are emitted to.", + "markdownDescription": "The ARN of the CloudWatch log group to which the vended log data will be published. This log group must exist prior to calling this operation.", "title": "LogGroupArn", "type": "string" } @@ -2319,7 +2319,7 @@ "additionalProperties": false, "properties": { "AccessToken": { - "markdownDescription": "The personal access token for a GitHub repository for an Amplify app. The personal access token is used to authorize access to a GitHub repository using the Amplify GitHub App. The token is not stored.\n\nUse `AccessToken` for GitHub repositories only. To authorize access to a repository provider such as Bitbucket or CodeCommit, use `OauthToken` .\n\nYou must specify either `AccessToken` or `OauthToken` when you create a new app.\n\nExisting Amplify apps deployed from a GitHub repository using OAuth continue to work with CI/CD. However, we strongly recommend that you migrate these apps to use the GitHub App. For more information, see [Migrating an existing OAuth app to the Amplify GitHub App](https://docs.aws.amazon.com/amplify/latest/userguide/setting-up-GitHub-access.html#migrating-to-github-app-auth) in the *Amplify User Guide* .\n\n*Length Constraints:* Minimum length of 1. Maximum length of 255.", + "markdownDescription": "The personal access token for a GitHub repository for an Amplify app. The personal access token is used to authorize access to a GitHub repository using the Amplify GitHub App. The token is not stored.\n\nUse `AccessToken` for GitHub repositories only. To authorize access to a repository provider such as Bitbucket or CodeCommit, use `OauthToken` .\n\nYou must specify either `AccessToken` or `OauthToken` when you create a new app.\n\nExisting Amplify apps deployed from a GitHub repository using OAuth continue to work with CI/CD. However, we strongly recommend that you migrate these apps to use the GitHub App. For more information, see [Migrating an existing OAuth app to the Amplify GitHub App](https://docs.aws.amazon.com/amplify/latest/userguide/setting-up-GitHub-access.html#migrating-to-github-app-auth) in the *Amplify User Guide* .", "title": "AccessToken", "type": "string" }, @@ -2334,12 +2334,12 @@ "title": "BasicAuthConfig" }, "BuildSpec": { - "markdownDescription": "The build specification (build spec) for an Amplify app.\n\n*Length Constraints:* Minimum length of 1. Maximum length of 25000.\n\n*Pattern:* (?s).+", + "markdownDescription": "The build specification (build spec) for an Amplify app.", "title": "BuildSpec", "type": "string" }, "CustomHeaders": { - "markdownDescription": "The custom HTTP headers for an Amplify app.\n\n*Length Constraints:* Minimum length of 0. Maximum length of 25000.\n\n*Pattern:* (?s).*", + "markdownDescription": "The custom HTTP headers for an Amplify app.", "title": "CustomHeaders", "type": "string" }, @@ -2352,7 +2352,7 @@ "type": "array" }, "Description": { - "markdownDescription": "The description for an Amplify app.\n\n*Length Constraints:* Maximum length of 1000.\n\n*Pattern:* (?s).*", + "markdownDescription": "The description of the Amplify app.", "title": "Description", "type": "string" }, @@ -2370,17 +2370,17 @@ "type": "array" }, "IAMServiceRole": { - "markdownDescription": "The AWS Identity and Access Management (IAM) service role for the Amazon Resource Name (ARN) of the Amplify app.\n\n*Length Constraints:* Minimum length of 0. Maximum length of 1000.\n\n*Pattern:* (?s).*", + "markdownDescription": "AWS Identity and Access Management ( IAM ) service role for the Amazon Resource Name (ARN) of the Amplify app.", "title": "IAMServiceRole", "type": "string" }, "Name": { - "markdownDescription": "The name for an Amplify app.\n\n*Length Constraints:* Minimum length of 1. Maximum length of 255.\n\n*Pattern:* (?s).+", + "markdownDescription": "The name of the Amplify app.", "title": "Name", "type": "string" }, "OauthToken": { - "markdownDescription": "The OAuth token for a third-party source control system for an Amplify app. The OAuth token is used to create a webhook and a read-only deploy key using SSH cloning. The OAuth token is not stored.\n\nUse `OauthToken` for repository providers other than GitHub, such as Bitbucket or CodeCommit. To authorize access to GitHub as your repository provider, use `AccessToken` .\n\nYou must specify either `OauthToken` or `AccessToken` when you create a new app.\n\nExisting Amplify apps deployed from a GitHub repository using OAuth continue to work with CI/CD. However, we strongly recommend that you migrate these apps to use the GitHub App. For more information, see [Migrating an existing OAuth app to the Amplify GitHub App](https://docs.aws.amazon.com/amplify/latest/userguide/setting-up-GitHub-access.html#migrating-to-github-app-auth) in the *Amplify User Guide* .\n\n*Length Constraints:* Maximum length of 1000.\n\n*Pattern:* (?s).*", + "markdownDescription": "The OAuth token for a third-party source control system for an Amplify app. The OAuth token is used to create a webhook and a read-only deploy key using SSH cloning. The OAuth token is not stored.\n\nUse `OauthToken` for repository providers other than GitHub, such as Bitbucket or CodeCommit. To authorize access to GitHub as your repository provider, use `AccessToken` .\n\nYou must specify either `OauthToken` or `AccessToken` when you create a new app.\n\nExisting Amplify apps deployed from a GitHub repository using OAuth continue to work with CI/CD. However, we strongly recommend that you migrate these apps to use the GitHub App. For more information, see [Migrating an existing OAuth app to the Amplify GitHub App](https://docs.aws.amazon.com/amplify/latest/userguide/setting-up-GitHub-access.html#migrating-to-github-app-auth) in the *Amplify User Guide* .", "title": "OauthToken", "type": "string" }, @@ -2390,7 +2390,7 @@ "type": "string" }, "Repository": { - "markdownDescription": "The repository for an Amplify app.\n\n*Pattern:* (?s).*", + "markdownDescription": "The Git repository for the Amplify app.", "title": "Repository", "type": "string" }, @@ -2446,7 +2446,7 @@ "title": "BasicAuthConfig" }, "BuildSpec": { - "markdownDescription": "The build specification (build spec) for the autocreated branch.\n\n*Length Constraints:* Minimum length of 1. Maximum length of 25000.", + "markdownDescription": "The build specification (build spec) for the autocreated branch.", "title": "BuildSpec", "type": "string" }, @@ -2484,7 +2484,7 @@ "type": "string" }, "PullRequestEnvironmentName": { - "markdownDescription": "If pull request previews are enabled, you can use this property to specify a dedicated backend environment for your previews. For example, you could specify an environment named `prod` , `test` , or `dev` that you initialized with the Amplify CLI.\n\nTo enable pull request previews, set the `EnablePullRequestPreview` property to `true` .\n\nIf you don't specify an environment, Amplify Hosting provides backend support for each preview by automatically provisioning a temporary backend environment. Amplify deletes this environment when the pull request is closed.\n\nFor more information about creating backend environments, see [Feature Branch Deployments and Team Workflows](https://docs.aws.amazon.com/amplify/latest/userguide/multi-environments.html) in the *AWS Amplify Hosting User Guide* .\n\n*Length Constraints:* Maximum length of 20.\n\n*Pattern:* (?s).*", + "markdownDescription": "If pull request previews are enabled, you can use this property to specify a dedicated backend environment for your previews. For example, you could specify an environment named `prod` , `test` , or `dev` that you initialized with the Amplify CLI.\n\nTo enable pull request previews, set the `EnablePullRequestPreview` property to `true` .\n\nIf you don't specify an environment, Amplify Hosting provides backend support for each preview by automatically provisioning a temporary backend environment. Amplify deletes this environment when the pull request is closed.\n\nFor more information about creating backend environments, see [Feature Branch Deployments and Team Workflows](https://docs.aws.amazon.com/amplify/latest/userguide/multi-environments.html) in the *AWS Amplify Hosting User Guide* .", "title": "PullRequestEnvironmentName", "type": "string" }, @@ -2505,12 +2505,12 @@ "type": "boolean" }, "Password": { - "markdownDescription": "The password for basic authorization.\n\n*Length Constraints:* Minimum length of 1. Maximum length of 255.", + "markdownDescription": "The password for basic authorization.", "title": "Password", "type": "string" }, "Username": { - "markdownDescription": "The user name for basic authorization.\n\n*Length Constraints:* Minimum length of 1. Maximum length of 255.", + "markdownDescription": "The user name for basic authorization.", "title": "Username", "type": "string" } @@ -2521,22 +2521,22 @@ "additionalProperties": false, "properties": { "Condition": { - "markdownDescription": "The condition for a URL rewrite or redirect rule, such as a country code.\n\n*Length Constraints:* Minimum length of 0. Maximum length of 2048.\n\n*Pattern:* (?s).*", + "markdownDescription": "The condition for a URL rewrite or redirect rule, such as a country code.", "title": "Condition", "type": "string" }, "Source": { - "markdownDescription": "The source pattern for a URL rewrite or redirect rule.\n\n*Length Constraints:* Minimum length of 1. Maximum length of 2048.\n\n*Pattern:* (?s).+", + "markdownDescription": "The source pattern for a URL rewrite or redirect rule.", "title": "Source", "type": "string" }, "Status": { - "markdownDescription": "The status code for a URL rewrite or redirect rule.\n\n- **200** - Represents a 200 rewrite rule.\n- **301** - Represents a 301 (moved pemanently) redirect rule. This and all future requests should be directed to the target URL.\n- **302** - Represents a 302 temporary redirect rule.\n- **404** - Represents a 404 redirect rule.\n- **404-200** - Represents a 404 rewrite rule.\n\n*Length Constraints:* Minimum length of 3. Maximum length of 7.\n\n*Pattern:* .{3,7}", + "markdownDescription": "The status code for a URL rewrite or redirect rule.\n\n- **200** - Represents a 200 rewrite rule.\n- **301** - Represents a 301 (moved pemanently) redirect rule. This and all future requests should be directed to the target URL.\n- **302** - Represents a 302 temporary redirect rule.\n- **404** - Represents a 404 redirect rule.\n- **404-200** - Represents a 404 rewrite rule.", "title": "Status", "type": "string" }, "Target": { - "markdownDescription": "The target pattern for a URL rewrite or redirect rule.\n\n*Length Constraints:* Minimum length of 1. Maximum length of 2048.\n\n*Pattern:* (?s).+", + "markdownDescription": "The target pattern for a URL rewrite or redirect rule.", "title": "Target", "type": "string" } @@ -2551,12 +2551,12 @@ "additionalProperties": false, "properties": { "Name": { - "markdownDescription": "The environment variable name.\n\n*Length Constraints:* Maximum length of 255.\n\n*Pattern:* (?s).*", + "markdownDescription": "", "title": "Name", "type": "string" }, "Value": { - "markdownDescription": "The environment variable value.\n\n*Length Constraints:* Maximum length of 5500.\n\n*Pattern:* (?s).*", + "markdownDescription": "", "title": "Value", "type": "string" } @@ -2609,7 +2609,7 @@ }, "Backend": { "$ref": "#/definitions/AWS::Amplify::Branch.Backend", - "markdownDescription": "The backend environment for an Amplify app.", + "markdownDescription": "Specifies the backend for a `Branch` of an Amplify app.", "title": "Backend" }, "BasicAuthConfig": { @@ -2618,17 +2618,17 @@ "title": "BasicAuthConfig" }, "BranchName": { - "markdownDescription": "The name for the branch.\n\n*Length Constraints:* Minimum length of 1. Maximum length of 255.\n\n*Pattern:* (?s).+", + "markdownDescription": "The name for the branch.", "title": "BranchName", "type": "string" }, "BuildSpec": { - "markdownDescription": "The build specification (build spec) for the branch.\n\n*Length Constraints:* Minimum length of 1. Maximum length of 25000.\n\n*Pattern:* (?s).+", + "markdownDescription": "The build specification (build spec) for the branch.", "title": "BuildSpec", "type": "string" }, "Description": { - "markdownDescription": "The description for the branch that is part of an Amplify app.\n\n*Length Constraints:* Maximum length of 1000.\n\n*Pattern:* (?s).*", + "markdownDescription": "The description for the branch that is part of an Amplify app.", "title": "Description", "type": "string" }, @@ -2661,12 +2661,12 @@ "type": "string" }, "PullRequestEnvironmentName": { - "markdownDescription": "If pull request previews are enabled for this branch, you can use this property to specify a dedicated backend environment for your previews. For example, you could specify an environment named `prod` , `test` , or `dev` that you initialized with the Amplify CLI and mapped to this branch.\n\nTo enable pull request previews, set the `EnablePullRequestPreview` property to `true` .\n\nIf you don't specify an environment, Amplify Hosting provides backend support for each preview by automatically provisioning a temporary backend environment. Amplify Hosting deletes this environment when the pull request is closed.\n\nFor more information about creating backend environments, see [Feature Branch Deployments and Team Workflows](https://docs.aws.amazon.com/amplify/latest/userguide/multi-environments.html) in the *AWS Amplify Hosting User Guide* .\n\n*Length Constraints:* Maximum length of 20.\n\n*Pattern:* (?s).*", + "markdownDescription": "If pull request previews are enabled for this branch, you can use this property to specify a dedicated backend environment for your previews. For example, you could specify an environment named `prod` , `test` , or `dev` that you initialized with the Amplify CLI and mapped to this branch.\n\nTo enable pull request previews, set the `EnablePullRequestPreview` property to `true` .\n\nIf you don't specify an environment, Amplify Hosting provides backend support for each preview by automatically provisioning a temporary backend environment. Amplify Hosting deletes this environment when the pull request is closed.\n\nFor more information about creating backend environments, see [Feature Branch Deployments and Team Workflows](https://docs.aws.amazon.com/amplify/latest/userguide/multi-environments.html) in the *AWS Amplify Hosting User Guide* .", "title": "PullRequestEnvironmentName", "type": "string" }, "Stage": { - "markdownDescription": "Describes the current stage for the branch.\n\n*Valid Values:* PRODUCTION | BETA | DEVELOPMENT | EXPERIMENTAL | PULL_REQUEST", + "markdownDescription": "Describes the current stage for the branch.", "title": "Stage", "type": "string" }, @@ -2726,12 +2726,12 @@ "type": "boolean" }, "Password": { - "markdownDescription": "The password for basic authorization.\n\n*Length Constraints:* Minimum length of 1. Maximum length of 255.", + "markdownDescription": "The password for basic authorization.", "title": "Password", "type": "string" }, "Username": { - "markdownDescription": "The user name for basic authorization.\n\n*Length Constraints:* Minimum length of 1. Maximum length of 255.", + "markdownDescription": "", "title": "Username", "type": "string" } @@ -2746,12 +2746,12 @@ "additionalProperties": false, "properties": { "Name": { - "markdownDescription": "The environment variable name.\n\n*Length Constraints:* Maximum length of 255.\n\n*Pattern:* (?s).*", + "markdownDescription": "The environment variable name.", "title": "Name", "type": "string" }, "Value": { - "markdownDescription": "The environment variable value.\n\n*Length Constraints:* Maximum length of 5500.\n\n*Pattern:* (?s).*", + "markdownDescription": "The environment variable value.", "title": "Value", "type": "string" } @@ -2798,7 +2798,7 @@ "additionalProperties": false, "properties": { "AppId": { - "markdownDescription": "The unique ID for an Amplify app.\n\n*Length Constraints:* Minimum length of 1. Maximum length of 20.\n\n*Pattern:* d[a-z0-9]+", + "markdownDescription": "The unique ID for an Amplify app.", "title": "AppId", "type": "string" }, @@ -2811,12 +2811,12 @@ "type": "array" }, "AutoSubDomainIAMRole": { - "markdownDescription": "The required AWS Identity and Access Management (IAM) service role for the Amazon Resource Name (ARN) for automatically creating subdomains.\n\n*Length Constraints:* Maximum length of 1000.\n\n*Pattern:* ^$|^arn:aws:iam::\\d{12}:role.+", + "markdownDescription": "The required AWS Identity and Access Management (IAMlong) service role for the Amazon Resource Name (ARN) for automatically creating subdomains.", "title": "AutoSubDomainIAMRole", "type": "string" }, "DomainName": { - "markdownDescription": "The domain name for the domain association.\n\n*Length Constraints:* Maximum length of 255.\n\n*Pattern:* ^(((?!-)[A-Za-z0-9-]{0,62}[A-Za-z0-9])\\.)+((?!-)[A-Za-z0-9-]{1,62}[A-Za-z0-9])(\\.)?$", + "markdownDescription": "The domain name for the domain association.", "title": "DomainName", "type": "string" }, @@ -2871,7 +2871,7 @@ "type": "string" }, "Prefix": { - "markdownDescription": "The prefix setting for the subdomain.\n\n*Length Constraints:* Maximum length of 255.\n\n*Pattern:* (?s).*", + "markdownDescription": "The prefix setting for the subdomain.", "title": "Prefix", "type": "string" } @@ -7124,7 +7124,7 @@ "type": "string" }, "IntegrationMethod": { - "markdownDescription": "Specifies the integration's HTTP method type.", + "markdownDescription": "Specifies the integration's HTTP method type. For WebSocket APIs, if you use a Lambda integration, you must set the integration method to `POST` .", "title": "IntegrationMethod", "type": "string" }, @@ -7748,7 +7748,7 @@ "type": "string" }, "IntegrationMethod": { - "markdownDescription": "Specifies the integration's HTTP method type.", + "markdownDescription": "Specifies the integration's HTTP method type. For WebSocket APIs, if you use a Lambda integration, you must set the integration method to `POST` .", "title": "IntegrationMethod", "type": "string" }, @@ -17584,7 +17584,7 @@ "type": "string" }, "DisconnectTimeoutInSeconds": { - "markdownDescription": "The amount of time that a streaming session remains active after users disconnect. If users try to reconnect to the streaming session after a disconnection or network interruption within this time interval, they are connected to their previous session. Otherwise, they are connected to a new session with a new streaming instance.\n\nSpecify a value between 60 and 360000.", + "markdownDescription": "The amount of time that a streaming session remains active after users disconnect. If users try to reconnect to the streaming session after a disconnection or network interruption within this time interval, they are connected to their previous session. Otherwise, they are connected to a new session with a new streaming instance.\n\nSpecify a value between 60 and 36000.", "title": "DisconnectTimeoutInSeconds", "type": "number" }, @@ -17614,7 +17614,7 @@ "type": "string" }, "IdleDisconnectTimeoutInSeconds": { - "markdownDescription": "The amount of time that users can be idle (inactive) before they are disconnected from their streaming session and the `DisconnectTimeoutInSeconds` time interval begins. Users are notified before they are disconnected due to inactivity. If they try to reconnect to the streaming session before the time interval specified in `DisconnectTimeoutInSeconds` elapses, they are connected to their previous session. Users are considered idle when they stop providing keyboard or mouse input during their streaming session. File uploads and downloads, audio in, audio out, and pixels changing do not qualify as user activity. If users continue to be idle after the time interval in `IdleDisconnectTimeoutInSeconds` elapses, they are disconnected.\n\nTo prevent users from being disconnected due to inactivity, specify a value of 0. Otherwise, specify a value between 60 and 3600.\n\nIf you enable this feature, we recommend that you specify a value that corresponds exactly to a whole number of minutes (for example, 60, 120, and 180). If you don't do this, the value is rounded to the nearest minute. For example, if you specify a value of 70, users are disconnected after 1 minute of inactivity. If you specify a value that is at the midpoint between two different minutes, the value is rounded up. For example, if you specify a value of 90, users are disconnected after 2 minutes of inactivity.", + "markdownDescription": "The amount of time that users can be idle (inactive) before they are disconnected from their streaming session and the `DisconnectTimeoutInSeconds` time interval begins. Users are notified before they are disconnected due to inactivity. If they try to reconnect to the streaming session before the time interval specified in `DisconnectTimeoutInSeconds` elapses, they are connected to their previous session. Users are considered idle when they stop providing keyboard or mouse input during their streaming session. File uploads and downloads, audio in, audio out, and pixels changing do not qualify as user activity. If users continue to be idle after the time interval in `IdleDisconnectTimeoutInSeconds` elapses, they are disconnected.\n\nTo prevent users from being disconnected due to inactivity, specify a value of 0. Otherwise, specify a value between 60 and 36000.\n\nIf you enable this feature, we recommend that you specify a value that corresponds exactly to a whole number of minutes (for example, 60, 120, and 180). If you don't do this, the value is rounded to the nearest minute. For example, if you specify a value of 70, users are disconnected after 1 minute of inactivity. If you specify a value that is at the midpoint between two different minutes, the value is rounded up. For example, if you specify a value of 90, users are disconnected after 2 minutes of inactivity.", "title": "IdleDisconnectTimeoutInSeconds", "type": "number" }, @@ -17639,7 +17639,7 @@ "type": "number" }, "MaxSessionsPerInstance": { - "markdownDescription": "The maximum number of user sessions on an instance. This only applies to multi-session fleets.", + "markdownDescription": "Max number of user sessions on an instance. This is applicable only for multi-session fleets.", "title": "MaxSessionsPerInstance", "type": "number" }, @@ -17726,7 +17726,7 @@ "type": "number" }, "DesiredSessions": { - "markdownDescription": "The desired number of user sessions for a multi-session fleet. This is not allowed for single-session fleets.\n\nWhen you create a fleet, you must set either the DesiredSessions or DesiredInstances attribute, based on the type of fleet you create. You can\u2019t define both attributes or leave both attributes blank.", + "markdownDescription": "The desired capacity in terms of number of user sessions, for the multi-session fleet. This is not allowed for single-session fleets.\n\nWhen you create a fleet, you must set define either the DesiredSessions or DesiredInstances attribute, based on the type of fleet you create. You can\u2019t define both attributes or leave both attributes blank.", "title": "DesiredSessions", "type": "number" } @@ -18215,7 +18215,7 @@ "type": "string" }, "MaximumLength": { - "markdownDescription": "Specifies the number of characters that can be copied by end users from the local device to the remote session, and to the local device from the remote session.\n\nThis can be specified only for the `CLIPBOARD_COPY_FROM_LOCAL_DEVICE` and `CLIPBOARD_COPY_TO_LOCAL_DEVICE` actions.\n\nThis defaults to 20,971,520 (20 MB) when unspecified and the permission is `ENABLED` . This can't be specified when the permission is `DISABLED` .\n\nThis can only be specified for AlwaysOn and OnDemand fleets. The attribute is not supported on Elastic fleets.\n\nThe value can be between 1 and 20,971,520 (20 MB).", + "markdownDescription": "Specifies the number of characters that can be copied by end users from the local device to the remote session, and to the local device from the remote session.\n\nThis can be specified only for the `CLIPBOARD_COPY_FROM_LOCAL_DEVICE` and `CLIPBOARD_COPY_TO_LOCAL_DEVICE` actions.\n\nThis defaults to 20,971,520 (20 MB) when unspecified and the permission is `ENABLED` . This can't be specified when the permission is `DISABLED` .\n\nThe value can be between 1 and 20,971,520 (20 MB).", "title": "MaximumLength", "type": "number" }, @@ -22455,7 +22455,7 @@ "type": "boolean" }, "SpotMaxPricePercentageOverLowestPrice": { - "markdownDescription": "[Price protection] The price protection threshold for Spot Instances, as a percentage higher than an identified Spot price. The identified Spot price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified price is from either the lowest priced current generation instance types or, failing that, the lowest priced previous generation instance types that match your attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage.\n\nTo turn off price protection, specify a high value, such as `999999` .\n\nIf you set `DesiredCapacityType` to `vcpu` or `memory-mib` , the price protection threshold is based on the per-vCPU or per-memory price instead of the per instance price.\n\n> Only one of `SpotMaxPricePercentageOverLowestPrice` or `MaxSpotPriceAsPercentageOfOptimalOnDemandPrice` can be specified. \n\nDefault: `100`", + "markdownDescription": "[Price protection] The price protection threshold for Spot Instances, as a percentage higher than an identified Spot price. The identified Spot price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified price is from either the lowest priced current generation instance types or, failing that, the lowest priced previous generation instance types that match your attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage.\n\nIf you set `DesiredCapacityType` to `vcpu` or `memory-mib` , the price protection threshold is based on the per-vCPU or per-memory price instead of the per instance price.\n\n> Only one of `SpotMaxPricePercentageOverLowestPrice` or `MaxSpotPriceAsPercentageOfOptimalOnDemandPrice` can be specified. If you don't specify either, Amazon EC2 Auto Scaling will automatically apply optimal price protection to consistently select from a wide range of instance types. To indicate no price protection threshold for Spot Instances, meaning you want to consider all instance types that match your attributes, include one of these parameters and specify a high value, such as `999999` .", "title": "SpotMaxPricePercentageOverLowestPrice", "type": "number" }, @@ -24725,8 +24725,6 @@ "type": "string" }, "ModifiedAt": { - "markdownDescription": "Returns a timestamp representing the date and time for the most recent change for the transformer object.", - "title": "ModifiedAt", "type": "string" }, "Name": { @@ -24990,7 +24988,7 @@ "type": "string" }, "ScheduleExpressionTimezone": { - "markdownDescription": "", + "markdownDescription": "This is the timezone in which the schedule expression is set. By default, ScheduleExpressions are in UTC. You can modify this to a specified timezone.", "title": "ScheduleExpressionTimezone", "type": "string" }, @@ -25524,7 +25522,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "The tag key-value pair applied to those AWS resources that you want to trigger an evaluation for a rule. A maximum of one key-value pair can be provided. The tag value is optional, but it cannot be an empty string. The structure to assign a tag is: `[{\"Key\":\"string\",\"Value\":\"string\"}]` .", + "markdownDescription": "The tag key-value pair applied to those AWS resources that you want to trigger an evaluation for a rule. A maximum of one key-value pair can be provided. The tag value is optional, but it cannot be an empty string if you are creating or editing a framework from the console (though the value can be an empty string when included in a CloudFormation template).\n\nThe structure to assign a tag is: `[{\"Key\":\"string\",\"Value\":\"string\"}]` .", "title": "Tags", "type": "array" } @@ -25786,7 +25784,7 @@ "items": { "$ref": "#/definitions/Tag" }, - "markdownDescription": "", + "markdownDescription": "Optional tags to include. A tag is a key-value pair you can use to manage, filter, and search for your resources. Allowed characters include UTF-8 letters,numbers, spaces, and the following characters: `+ - = . _ : /.`", "title": "Tags", "type": "array" } @@ -26256,12 +26254,12 @@ "items": { "$ref": "#/definitions/AWS::Batch::ComputeEnvironment.Ec2ConfigurationObject" }, - "markdownDescription": "Provides information used to select Amazon Machine Images (AMIs) for EC2 instances in the compute environment. If `Ec2Configuration` isn't specified, the default is `ECS_AL2` .\n\nWhen updating a compute environment, changing this setting requires an infrastructure update of the compute environment. For more information, see [Updating compute environments](https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html) in the *AWS Batch User Guide* . To remove the EC2 configuration and any custom AMI ID specified in `imageIdOverride` , set this value to an empty string.\n\nOne or two values can be provided.\n\n> This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.", + "markdownDescription": "Provides information used to select Amazon Machine Images (AMIs) for Amazon EC2 instances in the compute environment. If `Ec2Configuration` isn't specified, the default is `ECS_AL2` .\n\nWhen updating a compute environment, changing this setting requires an infrastructure update of the compute environment. For more information, see [Updating compute environments](https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html) in the *AWS Batch User Guide* . To remove the Amazon EC2 configuration and any custom AMI ID specified in `imageIdOverride` , set this value to an empty string.\n\nOne or two values can be provided.\n\n> This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.", "title": "Ec2Configuration", "type": "array" }, "Ec2KeyPair": { - "markdownDescription": "The Amazon EC2 key pair that's used for instances launched in the compute environment. You can use this key pair to log in to your instances with SSH. To remove the Amazon EC2 key pair, set this value to an empty string.\n\nWhen updating a compute environment, changing the EC2 key pair requires an infrastructure update of the compute environment. For more information, see [Updating compute environments](https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html) in the *AWS Batch User Guide* .\n\n> This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.", + "markdownDescription": "The Amazon EC2 key pair that's used for instances launched in the compute environment. You can use this key pair to log in to your instances with SSH. To remove the Amazon EC2 key pair, set this value to an empty string.\n\nWhen updating a compute environment, changing the Amazon EC2 key pair requires an infrastructure update of the compute environment. For more information, see [Updating compute environments](https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html) in the *AWS Batch User Guide* .\n\n> This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.", "title": "Ec2KeyPair", "type": "string" }, @@ -26307,7 +26305,7 @@ "items": { "type": "string" }, - "markdownDescription": "The Amazon EC2 security groups that are associated with instances launched in the compute environment. This parameter is required for Fargate compute resources, where it can contain up to 5 security groups. For Fargate compute resources, providing an empty list is handled as if this parameter wasn't specified and no change is made. For EC2 compute resources, providing an empty list removes the security groups from the compute resource.\n\nWhen updating a compute environment, changing the EC2 security groups requires an infrastructure update of the compute environment. For more information, see [Updating compute environments](https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html) in the *AWS Batch User Guide* .", + "markdownDescription": "The Amazon EC2 security groups that are associated with instances launched in the compute environment. This parameter is required for Fargate compute resources, where it can contain up to 5 security groups. For Fargate compute resources, providing an empty list is handled as if this parameter wasn't specified and no change is made. For Amazon EC2 compute resources, providing an empty list removes the security groups from the compute resource.\n\nWhen updating a compute environment, changing the Amazon EC2 security groups requires an infrastructure update of the compute environment. For more information, see [Updating compute environments](https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html) in the *AWS Batch User Guide* .", "title": "SecurityGroupIds", "type": "array" }, @@ -26320,13 +26318,13 @@ "items": { "type": "string" }, - "markdownDescription": "The VPC subnets where the compute resources are launched. Fargate compute resources can contain up to 16 subnets. For Fargate compute resources, providing an empty list will be handled as if this parameter wasn't specified and no change is made. For EC2 compute resources, providing an empty list removes the VPC subnets from the compute resource. For more information, see [VPCs and subnets](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html) in the *Amazon VPC User Guide* .\n\nWhen updating a compute environment, changing the VPC subnets requires an infrastructure update of the compute environment. For more information, see [Updating compute environments](https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html) in the *AWS Batch User Guide* .\n\n> AWS Batch on Amazon EC2 and AWS Batch on Amazon EKS support Local Zones. For more information, see [Local Zones](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-local-zones) in the *Amazon EC2 User Guide for Linux Instances* , [Amazon EKS and AWS Local Zones](https://docs.aws.amazon.com/eks/latest/userguide/local-zones.html) in the *Amazon EKS User Guide* and [Amazon ECS clusters in Local Zones, Wavelength Zones, and AWS Outposts](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/cluster-regions-zones.html#clusters-local-zones) in the *Amazon ECS Developer Guide* .\n> \n> AWS Batch on Fargate doesn't currently support Local Zones.", + "markdownDescription": "The VPC subnets where the compute resources are launched. Fargate compute resources can contain up to 16 subnets. For Fargate compute resources, providing an empty list will be handled as if this parameter wasn't specified and no change is made. For Amazon EC2 compute resources, providing an empty list removes the VPC subnets from the compute resource. For more information, see [VPCs and subnets](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html) in the *Amazon VPC User Guide* .\n\nWhen updating a compute environment, changing the VPC subnets requires an infrastructure update of the compute environment. For more information, see [Updating compute environments](https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html) in the *AWS Batch User Guide* .\n\n> AWS Batch on Amazon EC2 and AWS Batch on Amazon EKS support Local Zones. For more information, see [Local Zones](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-local-zones) in the *Amazon EC2 User Guide for Linux Instances* , [Amazon EKS and AWS Local Zones](https://docs.aws.amazon.com/eks/latest/userguide/local-zones.html) in the *Amazon EKS User Guide* and [Amazon ECS clusters in Local Zones, Wavelength Zones, and AWS Outposts](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/cluster-regions-zones.html#clusters-local-zones) in the *Amazon ECS Developer Guide* .\n> \n> AWS Batch on Fargate doesn't currently support Local Zones.", "title": "Subnets", "type": "array" }, "Tags": { "additionalProperties": true, - "markdownDescription": "Key-value pair tags to be applied to EC2 resources that are launched in the compute environment. For AWS Batch , these take the form of `\"String1\": \"String2\"` , where `String1` is the tag key and `String2` is the tag value-for example, `{ \"Name\": \"Batch Instance - C4OnDemand\" }` . This is helpful for recognizing your Batch instances in the Amazon EC2 console. These tags aren't seen when using the AWS Batch `ListTagsForResource` API operation.\n\nWhen updating a compute environment, changing this setting requires an infrastructure update of the compute environment. For more information, see [Updating compute environments](https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html) in the *AWS Batch User Guide* .\n\n> This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.", + "markdownDescription": "Key-value pair tags to be applied to Amazon EC2 resources that are launched in the compute environment. For AWS Batch , these take the form of `\"String1\": \"String2\"` , where `String1` is the tag key and `String2` is the tag value-for example, `{ \"Name\": \"Batch Instance - C4OnDemand\" }` . This is helpful for recognizing your Batch instances in the Amazon EC2 console. These tags aren't seen when using the AWS Batch `ListTagsForResource` API operation.\n\nWhen updating a compute environment, changing this setting requires an infrastructure update of the compute environment. For more information, see [Updating compute environments](https://docs.aws.amazon.com/batch/latest/userguide/updating-compute-environments.html) in the *AWS Batch User Guide* .\n\n> This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" @@ -26471,12 +26469,12 @@ "properties": { "ContainerProperties": { "$ref": "#/definitions/AWS::Batch::JobDefinition.ContainerProperties", - "markdownDescription": "An object with various properties specific to Amazon ECS based jobs. Valid values are `containerProperties` , `eksProperties` , and `nodeProperties` . Only one can be specified.", + "markdownDescription": "An object with properties specific to Amazon ECS-based jobs. When `containerProperties` is used in the job definition, it can't be used in addition to `eksProperties` , `ecsProperties` , or `nodeProperties` .", "title": "ContainerProperties" }, "EksProperties": { "$ref": "#/definitions/AWS::Batch::JobDefinition.EksProperties", - "markdownDescription": "An object with various properties that are specific to Amazon EKS based jobs. Valid values are `containerProperties` , `eksProperties` , and `nodeProperties` . Only one can be specified.", + "markdownDescription": "An object with properties that are specific to Amazon EKS-based jobs. When `eksProperties` is used in the job definition, it can't be used in addition to `containerProperties` , `ecsProperties` , or `nodeProperties` .", "title": "EksProperties" }, "JobDefinitionName": { @@ -26486,7 +26484,7 @@ }, "NodeProperties": { "$ref": "#/definitions/AWS::Batch::JobDefinition.NodeProperties", - "markdownDescription": "An object with various properties that are specific to multi-node parallel jobs. Valid values are `containerProperties` , `eksProperties` , and `nodeProperties` . Only one can be specified.\n\n> If the job runs on Fargate resources, don't specify `nodeProperties` . Use `containerProperties` instead.", + "markdownDescription": "An object with properties that are specific to multi-node parallel jobs. When `nodeProperties` is used in the job definition, it can't be used in addition to `containerProperties` , `ecsProperties` , or `eksProperties` .\n\n> If the job runs on Fargate resources, don't specify `nodeProperties` . Use `containerProperties` instead.", "title": "NodeProperties" }, "Parameters": { @@ -26540,7 +26538,7 @@ "title": "Timeout" }, "Type": { - "markdownDescription": "The type of job definition. For more information about multi-node parallel jobs, see [Creating a multi-node parallel job definition](https://docs.aws.amazon.com/batch/latest/userguide/multi-node-job-def.html) in the *AWS Batch User Guide* .\n\n> If the job is run on Fargate resources, then `multinode` isn't supported.", + "markdownDescription": "The type of job definition. For more information about multi-node parallel jobs, see [Creating a multi-node parallel job definition](https://docs.aws.amazon.com/batch/latest/userguide/multi-node-job-def.html) in the *AWS Batch User Guide* .\n\n- If the value is `container` , then one of the following is required: `containerProperties` , `ecsProperties` , or `eksProperties` .\n- If the value is `multinode` , then `nodeProperties` is required.\n\n> If the job is run on Fargate resources, then `multinode` isn't supported.", "title": "Type", "type": "string" } @@ -26602,7 +26600,7 @@ }, "FargatePlatformConfiguration": { "$ref": "#/definitions/AWS::Batch::JobDefinition.FargatePlatformConfiguration", - "markdownDescription": "The platform configuration for jobs that are running on Fargate resources. Jobs that are running on EC2 resources must not specify this parameter.", + "markdownDescription": "The platform configuration for jobs that are running on Fargate resources. Jobs that are running on Amazon EC2 resources must not specify this parameter.", "title": "FargatePlatformConfiguration" }, "Image": { @@ -26627,11 +26625,11 @@ }, "LogConfiguration": { "$ref": "#/definitions/AWS::Batch::JobDefinition.LogConfiguration", - "markdownDescription": "The log configuration specification for the container.\n\nThis parameter maps to `LogConfig` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/) and the `--log-driver` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) . By default, containers use the same logging driver that the Docker daemon uses. However the container might use a different logging driver than the Docker daemon by specifying a log driver with this parameter in the container definition. To use a different logging driver for a container, the log system must be configured properly on the container instance (or on a different log server for remote logging options). For more information on the options for different supported log drivers, see [Configure logging drivers](https://docs.aws.amazon.com/https://docs.docker.com/engine/admin/logging/overview/) in the Docker documentation.\n\n> AWS Batch currently supports a subset of the logging drivers available to the Docker daemon (shown in the `LogConfiguration` data type). \n\nThis parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version | grep \"Server API version\"`\n\n> The Amazon ECS container agent running on a container instance must register the logging drivers available on that instance with the `ECS_AVAILABLE_LOGGING_DRIVERS` environment variable before containers placed on that instance can use these log configuration options. For more information, see [Amazon ECS container agent configuration](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) in the *Amazon Elastic Container Service Developer Guide* .", + "markdownDescription": "The log configuration specification for the container.\n\nThis parameter maps to `LogConfig` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/) and the `--log-driver` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) . By default, containers use the same logging driver that the Docker daemon uses. However the container might use a different logging driver than the Docker daemon by specifying a log driver with this parameter in the container definition. To use a different logging driver for a container, the log system must be configured properly on the container instance (or on a different log server for remote logging options). For more information on the options for different supported log drivers, see [Configure logging drivers](https://docs.aws.amazon.com/https://docs.docker.com/engine/admin/logging/overview/) in the Docker documentation.\n\n> AWS Batch currently supports a subset of the logging drivers available to the Docker daemon (shown in the [LogConfiguration](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties-logconfiguration.html) data type). \n\nThis parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: `sudo docker version | grep \"Server API version\"`\n\n> The Amazon ECS container agent running on a container instance must register the logging drivers available on that instance with the `ECS_AVAILABLE_LOGGING_DRIVERS` environment variable before containers placed on that instance can use these log configuration options. For more information, see [Amazon ECS container agent configuration](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) in the *Amazon Elastic Container Service Developer Guide* .", "title": "LogConfiguration" }, "Memory": { - "markdownDescription": "This parameter is deprecated, use `resourceRequirements` to specify the memory requirements for the job definition. It's not supported for jobs running on Fargate resources. For jobs that run on EC2 resources, it specifies the memory hard limit (in MiB) for a container. If your container attempts to exceed the specified number, it's terminated. You must specify at least 4 MiB of memory for a job using this parameter. The memory hard limit can be specified in several places. It must be specified for each node at least once.", + "markdownDescription": "This parameter is deprecated, use `resourceRequirements` to specify the memory requirements for the job definition. It's not supported for jobs running on Fargate resources. For jobs that run on Amazon EC2 resources, it specifies the memory hard limit (in MiB) for a container. If your container attempts to exceed the specified number, it's terminated. You must specify at least 4 MiB of memory for a job using this parameter. The memory hard limit can be specified in several places. It must be specified for each node at least once.", "title": "Memory", "type": "number" }, @@ -26645,7 +26643,7 @@ }, "NetworkConfiguration": { "$ref": "#/definitions/AWS::Batch::JobDefinition.NetworkConfiguration", - "markdownDescription": "The network configuration for jobs that are running on Fargate resources. Jobs that are running on EC2 resources must not specify this parameter.", + "markdownDescription": "The network configuration for jobs that are running on Fargate resources. Jobs that are running on Amazon EC2 resources must not specify this parameter.", "title": "NetworkConfiguration" }, "Privileged": { @@ -26693,7 +26691,7 @@ "type": "string" }, "Vcpus": { - "markdownDescription": "This parameter is deprecated, use `resourceRequirements` to specify the vCPU requirements for the job definition. It's not supported for jobs running on Fargate resources. For jobs running on EC2 resources, it specifies the number of vCPUs reserved for the job.\n\nEach vCPU is equivalent to 1,024 CPU shares. This parameter maps to `CpuShares` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/) and the `--cpu-shares` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) . The number of vCPUs must be specified but can be specified in several places. You must specify it at least once for each node.", + "markdownDescription": "This parameter is deprecated, use `resourceRequirements` to specify the vCPU requirements for the job definition. It's not supported for jobs running on Fargate resources. For jobs running on Amazon EC2 resources, it specifies the number of vCPUs reserved for the job.\n\nEach vCPU is equivalent to 1,024 CPU shares. This parameter maps to `CpuShares` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/) and the `--cpu-shares` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) . The number of vCPUs must be specified but can be specified in several places. You must specify it at least once for each node.", "title": "Vcpus", "type": "number" }, @@ -26778,7 +26776,7 @@ "items": { "type": "string" }, - "markdownDescription": "An array of arguments to the entrypoint. If this isn't specified, the `CMD` of the container image is used. This corresponds to the `args` member in the [Entrypoint](https://docs.aws.amazon.com/https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#entrypoint) portion of the [Pod](https://docs.aws.amazon.com/https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/) in Kubernetes. Environment variable references are expanded using the container's environment.\n\nIf the referenced environment variable doesn't exist, the reference in the command isn't changed. For example, if the reference is to \" `$(NAME1)` \" and the `NAME1` environment variable doesn't exist, the command string will remain \" `$(NAME1)` .\" `$$` is replaced with `$` , and the resulting string isn't expanded. For example, `$$(VAR_NAME)` is passed as `$(VAR_NAME)` whether or not the `VAR_NAME` environment variable exists. For more information, see [CMD](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/builder/#cmd) in the *Dockerfile reference* and [Define a command and arguments for a pod](https://docs.aws.amazon.com/https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/) in the *Kubernetes documentation* .", + "markdownDescription": "An array of arguments to the entrypoint. If this isn't specified, the `CMD` of the container image is used. This corresponds to the `args` member in the [Entrypoint](https://docs.aws.amazon.com/https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#entrypoint) portion of the [Pod](https://docs.aws.amazon.com/https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/) in Kubernetes. Environment variable references are expanded using the container's environment.\n\nIf the referenced environment variable doesn't exist, the reference in the command isn't changed. For example, if the reference is to \" `$(NAME1)` \" and the `NAME1` environment variable doesn't exist, the command string will remain \" `$(NAME1)` .\" `$$` is replaced with `$` , and the resulting string isn't expanded. For example, `$$(VAR_NAME)` is passed as `$(VAR_NAME)` whether or not the `VAR_NAME` environment variable exists. For more information, see [Dockerfile reference: CMD](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/builder/#cmd) and [Define a command and arguments for a pod](https://docs.aws.amazon.com/https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/) in the *Kubernetes documentation* .", "title": "Args", "type": "array" }, @@ -27313,7 +27311,7 @@ "type": "string" }, "Value": { - "markdownDescription": "The quantity of the specified resource to reserve for the container. The values vary based on the `type` specified.\n\n- **type=\"GPU\"** - The number of physical GPUs to reserve for the container. Make sure that the number of GPUs reserved for all containers in a job doesn't exceed the number of available GPUs on the compute resource that the job is launched on.\n\n> GPUs aren't available for jobs that are running on Fargate resources.\n- **type=\"MEMORY\"** - The memory hard limit (in MiB) present to the container. This parameter is supported for jobs that are running on EC2 resources. If your container attempts to exceed the memory specified, the container is terminated. This parameter maps to `Memory` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/) and the `--memory` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) . You must specify at least 4 MiB of memory for a job. This is required but can be specified in several places for multi-node parallel (MNP) jobs. It must be specified for each node at least once. This parameter maps to `Memory` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/) and the `--memory` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) .\n\n> If you're trying to maximize your resource utilization by providing your jobs as much memory as possible for a particular instance type, see [Memory management](https://docs.aws.amazon.com/batch/latest/userguide/memory-management.html) in the *AWS Batch User Guide* . \n\nFor jobs that are running on Fargate resources, then `value` is the hard limit (in MiB), and must match one of the supported values and the `VCPU` values must be one of the values supported for that memory value.\n\n- **value = 512** - `VCPU` = 0.25\n- **value = 1024** - `VCPU` = 0.25 or 0.5\n- **value = 2048** - `VCPU` = 0.25, 0.5, or 1\n- **value = 3072** - `VCPU` = 0.5, or 1\n- **value = 4096** - `VCPU` = 0.5, 1, or 2\n- **value = 5120, 6144, or 7168** - `VCPU` = 1 or 2\n- **value = 8192** - `VCPU` = 1, 2, or 4\n- **value = 9216, 10240, 11264, 12288, 13312, 14336, or 15360** - `VCPU` = 2 or 4\n- **value = 16384** - `VCPU` = 2, 4, or 8\n- **value = 17408, 18432, 19456, 21504, 22528, 23552, 25600, 26624, 27648, 29696, or 30720** - `VCPU` = 4\n- **value = 20480, 24576, or 28672** - `VCPU` = 4 or 8\n- **value = 36864, 45056, 53248, or 61440** - `VCPU` = 8\n- **value = 32768, 40960, 49152, or 57344** - `VCPU` = 8 or 16\n- **value = 65536, 73728, 81920, 90112, 98304, 106496, 114688, or 122880** - `VCPU` = 16\n- **type=\"VCPU\"** - The number of vCPUs reserved for the container. This parameter maps to `CpuShares` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/) and the `--cpu-shares` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) . Each vCPU is equivalent to 1,024 CPU shares. For EC2 resources, you must specify at least one vCPU. This is required but can be specified in several places; it must be specified for each node at least once.\n\nThe default for the Fargate On-Demand vCPU resource count quota is 6 vCPUs. For more information about Fargate quotas, see [AWS Fargate quotas](https://docs.aws.amazon.com/general/latest/gr/ecs-service.html#service-quotas-fargate) in the *AWS General Reference* .\n\nFor jobs that are running on Fargate resources, then `value` must match one of the supported values and the `MEMORY` values must be one of the values supported for that `VCPU` value. The supported values are 0.25, 0.5, 1, 2, 4, 8, and 16\n\n- **value = 0.25** - `MEMORY` = 512, 1024, or 2048\n- **value = 0.5** - `MEMORY` = 1024, 2048, 3072, or 4096\n- **value = 1** - `MEMORY` = 2048, 3072, 4096, 5120, 6144, 7168, or 8192\n- **value = 2** - `MEMORY` = 4096, 5120, 6144, 7168, 8192, 9216, 10240, 11264, 12288, 13312, 14336, 15360, or 16384\n- **value = 4** - `MEMORY` = 8192, 9216, 10240, 11264, 12288, 13312, 14336, 15360, 16384, 17408, 18432, 19456, 20480, 21504, 22528, 23552, 24576, 25600, 26624, 27648, 28672, 29696, or 30720\n- **value = 8** - `MEMORY` = 16384, 20480, 24576, 28672, 32768, 36864, 40960, 45056, 49152, 53248, 57344, or 61440\n- **value = 16** - `MEMORY` = 32768, 40960, 49152, 57344, 65536, 73728, 81920, 90112, 98304, 106496, 114688, or 122880", + "markdownDescription": "The quantity of the specified resource to reserve for the container. The values vary based on the `type` specified.\n\n- **type=\"GPU\"** - The number of physical GPUs to reserve for the container. Make sure that the number of GPUs reserved for all containers in a job doesn't exceed the number of available GPUs on the compute resource that the job is launched on.\n\n> GPUs aren't available for jobs that are running on Fargate resources.\n- **type=\"MEMORY\"** - The memory hard limit (in MiB) present to the container. This parameter is supported for jobs that are running on Amazon EC2 resources. If your container attempts to exceed the memory specified, the container is terminated. This parameter maps to `Memory` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/) and the `--memory` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) . You must specify at least 4 MiB of memory for a job. This is required but can be specified in several places for multi-node parallel (MNP) jobs. It must be specified for each node at least once. This parameter maps to `Memory` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/) and the `--memory` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) .\n\n> If you're trying to maximize your resource utilization by providing your jobs as much memory as possible for a particular instance type, see [Memory management](https://docs.aws.amazon.com/batch/latest/userguide/memory-management.html) in the *AWS Batch User Guide* . \n\nFor jobs that are running on Fargate resources, then `value` is the hard limit (in MiB), and must match one of the supported values and the `VCPU` values must be one of the values supported for that memory value.\n\n- **value = 512** - `VCPU` = 0.25\n- **value = 1024** - `VCPU` = 0.25 or 0.5\n- **value = 2048** - `VCPU` = 0.25, 0.5, or 1\n- **value = 3072** - `VCPU` = 0.5, or 1\n- **value = 4096** - `VCPU` = 0.5, 1, or 2\n- **value = 5120, 6144, or 7168** - `VCPU` = 1 or 2\n- **value = 8192** - `VCPU` = 1, 2, or 4\n- **value = 9216, 10240, 11264, 12288, 13312, 14336, or 15360** - `VCPU` = 2 or 4\n- **value = 16384** - `VCPU` = 2, 4, or 8\n- **value = 17408, 18432, 19456, 21504, 22528, 23552, 25600, 26624, 27648, 29696, or 30720** - `VCPU` = 4\n- **value = 20480, 24576, or 28672** - `VCPU` = 4 or 8\n- **value = 36864, 45056, 53248, or 61440** - `VCPU` = 8\n- **value = 32768, 40960, 49152, or 57344** - `VCPU` = 8 or 16\n- **value = 65536, 73728, 81920, 90112, 98304, 106496, 114688, or 122880** - `VCPU` = 16\n- **type=\"VCPU\"** - The number of vCPUs reserved for the container. This parameter maps to `CpuShares` in the [Create a container](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/#create-a-container) section of the [Docker Remote API](https://docs.aws.amazon.com/https://docs.docker.com/engine/api/v1.23/) and the `--cpu-shares` option to [docker run](https://docs.aws.amazon.com/https://docs.docker.com/engine/reference/run/) . Each vCPU is equivalent to 1,024 CPU shares. For Amazon EC2 resources, you must specify at least one vCPU. This is required but can be specified in several places; it must be specified for each node at least once.\n\nThe default for the Fargate On-Demand vCPU resource count quota is 6 vCPUs. For more information about Fargate quotas, see [AWS Fargate quotas](https://docs.aws.amazon.com/general/latest/gr/ecs-service.html#service-quotas-fargate) in the *AWS General Reference* .\n\nFor jobs that are running on Fargate resources, then `value` must match one of the supported values and the `MEMORY` values must be one of the values supported for that `VCPU` value. The supported values are 0.25, 0.5, 1, 2, 4, 8, and 16\n\n- **value = 0.25** - `MEMORY` = 512, 1024, or 2048\n- **value = 0.5** - `MEMORY` = 1024, 2048, 3072, or 4096\n- **value = 1** - `MEMORY` = 2048, 3072, 4096, 5120, 6144, 7168, or 8192\n- **value = 2** - `MEMORY` = 4096, 5120, 6144, 7168, 8192, 9216, 10240, 11264, 12288, 13312, 14336, 15360, or 16384\n- **value = 4** - `MEMORY` = 8192, 9216, 10240, 11264, 12288, 13312, 14336, 15360, 16384, 17408, 18432, 19456, 20480, 21504, 22528, 23552, 24576, 25600, 26624, 27648, 28672, 29696, or 30720\n- **value = 8** - `MEMORY` = 16384, 20480, 24576, 28672, 32768, 36864, 40960, 45056, 49152, 53248, 57344, or 61440\n- **value = 16** - `MEMORY` = 32768, 40960, 49152, 57344, 65536, 73728, 81920, 90112, 98304, 106496, 114688, or 122880", "title": "Value", "type": "string" } @@ -33829,7 +33827,7 @@ "type": "boolean" }, "HttpVersion": { - "markdownDescription": "(Optional) Specify the maximum HTTP version(s) that you want viewers to use to communicate with CloudFront . The default value for new distributions is `http1.1` .\n\nFor viewers and CloudFront to use HTTP/2, viewers must support TLSv1.2 or later, and must support Server Name Indication (SNI).\n\nFor viewers and CloudFront to use HTTP/3, viewers must support TLSv1.3 and Server Name Indication (SNI). CloudFront supports HTTP/3 connection migration to allow the viewer to switch networks without losing connection. For more information about connection migration, see [Connection Migration](https://docs.aws.amazon.com/https://www.rfc-editor.org/rfc/rfc9000.html#name-connection-migration) at RFC 9000. For more information about supported TLSv1.3 ciphers, see [Supported protocols and ciphers between viewers and CloudFront](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/secure-connections-supported-viewer-protocols-ciphers.html) .", + "markdownDescription": "(Optional) Specify the HTTP version(s) that you want viewers to use to communicate with CloudFront . The default value for new distributions is `http1.1` .\n\nFor viewers and CloudFront to use HTTP/2, viewers must support TLSv1.2 or later, and must support Server Name Indication (SNI).\n\nFor viewers and CloudFront to use HTTP/3, viewers must support TLSv1.3 and Server Name Indication (SNI). CloudFront supports HTTP/3 connection migration to allow the viewer to switch networks without losing connection. For more information about connection migration, see [Connection Migration](https://docs.aws.amazon.com/https://www.rfc-editor.org/rfc/rfc9000.html#name-connection-migration) at RFC 9000. For more information about supported TLSv1.3 ciphers, see [Supported protocols and ciphers between viewers and CloudFront](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/secure-connections-supported-viewer-protocols-ciphers.html) .", "title": "HttpVersion", "type": "string" }, @@ -34285,7 +34283,7 @@ "additionalProperties": false, "properties": { "OriginAccessIdentity": { - "markdownDescription": "The CloudFront origin access identity to associate with the origin. Use an origin access identity to configure the origin so that viewers can *only* access objects in an Amazon S3 bucket through CloudFront. The format of the value is:\n\norigin-access-identity/cloudfront/ *ID-of-origin-access-identity*\n\nwhere `*ID-of-origin-access-identity*` is the value that CloudFront returned in the `ID` element when you created the origin access identity.\n\nIf you want viewers to be able to access objects using either the CloudFront URL or the Amazon S3 URL, specify an empty `OriginAccessIdentity` element.\n\nTo delete the origin access identity from an existing distribution, update the distribution configuration and include an empty `OriginAccessIdentity` element.\n\nTo replace the origin access identity, update the distribution configuration and specify the new origin access identity.\n\nFor more information about the origin access identity, see [Serving Private Content through CloudFront](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/PrivateContent.html) in the *Amazon CloudFront Developer Guide* .", + "markdownDescription": "> If you're using origin access control (OAC) instead of origin access identity, specify an empty `OriginAccessIdentity` element. For more information, see [Restricting access to an AWS](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-restricting-access-to-origin.html) in the *Amazon CloudFront Developer Guide* . \n\nThe CloudFront origin access identity to associate with the origin. Use an origin access identity to configure the origin so that viewers can *only* access objects in an Amazon S3 bucket through CloudFront. The format of the value is:\n\n`origin-access-identity/cloudfront/ID-of-origin-access-identity`\n\nThe `*ID-of-origin-access-identity*` is the value that CloudFront returned in the `ID` element when you created the origin access identity.\n\nIf you want viewers to be able to access objects using either the CloudFront URL or the Amazon S3 URL, specify an empty `OriginAccessIdentity` element.\n\nTo delete the origin access identity from an existing distribution, update the distribution configuration and include an empty `OriginAccessIdentity` element.\n\nTo replace the origin access identity, update the distribution configuration and specify the new origin access identity.\n\nFor more information about the origin access identity, see [Serving Private Content through CloudFront](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/PrivateContent.html) in the *Amazon CloudFront Developer Guide* .", "title": "OriginAccessIdentity", "type": "string" } @@ -36248,7 +36246,7 @@ "type": "array" }, "Field": { - "markdownDescription": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `readOnly` , `eventCategory` , and `eventSource` .\n\nFor CloudTrail data events, supported fields include `readOnly` , `eventCategory` , `eventName` , `resources.type` , and `resources.ARN` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - Optional. Can be set to `Equals` a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - For filtering management events only. This can be set to `NotEquals` `kms.amazonaws.com` or `NotEquals` `rdsdata.amazonaws.com` .\n- *`eventName`* - Can use any operator. You can use it to \ufb01lter in or \ufb01lter out any data event logged to CloudTrail, such as `PutBucket` or `GetSnapshotBlock` . You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator, and the value can be one of the following:\n\n- `AWS::DynamoDB::Table`\n- `AWS::Lambda::Function`\n- `AWS::S3::Object`\n- `AWS::AppConfig::Configuration`\n- `AWS::B2BI::Transformer`\n- `AWS::Bedrock::AgentAlias`\n- `AWS::Bedrock::KnowledgeBase`\n- `AWS::Cassandra::Table`\n- `AWS::CloudFront::KeyValueStore`\n- `AWS::CloudTrail::Channel`\n- `AWS::CodeWhisperer::Customization`\n- `AWS::CodeWhisperer::Profile`\n- `AWS::Cognito::IdentityPool`\n- `AWS::DynamoDB::Stream`\n- `AWS::EC2::Snapshot`\n- `AWS::EMRWAL::Workspace`\n- `AWS::FinSpace::Environment`\n- `AWS::Glue::Table`\n- `AWS::GreengrassV2::ComponentVersion`\n- `AWS::GreengrassV2::Deployment`\n- `AWS::GuardDuty::Detector`\n- `AWS::IoT::Certificate`\n- `AWS::IoT::Thing`\n- `AWS::IoTSiteWise::Asset`\n- `AWS::IoTSiteWise::TimeSeries`\n- `AWS::IoTTwinMaker::Entity`\n- `AWS::IoTTwinMaker::Workspace`\n- `AWS::KendraRanking::ExecutionPlan`\n- `AWS::KinesisVideo::Stream`\n- `AWS::ManagedBlockchain::Network`\n- `AWS::ManagedBlockchain::Node`\n- `AWS::MedicalImaging::Datastore`\n- `AWS::NeptuneGraph::Graph`\n- `AWS::PCAConnectorAD::Connector`\n- `AWS::QBusiness::Application`\n- `AWS::QBusiness::DataSource`\n- `AWS::QBusiness::Index`\n- `AWS::QBusiness::WebExperience`\n- `AWS::RDS::DBCluster`\n- `AWS::S3::AccessPoint`\n- `AWS::S3ObjectLambda::AccessPoint`\n- `AWS::S3Outposts::Object`\n- `AWS::SageMaker::Endpoint`\n- `AWS::SageMaker::ExperimentTrialComponent`\n- `AWS::SageMaker::FeatureGroup`\n- `AWS::ServiceDiscovery::Namespace`\n- `AWS::ServiceDiscovery::Service`\n- `AWS::SCN::Instance`\n- `AWS::SNS::PlatformEndpoint`\n- `AWS::SNS::Topic`\n- `AWS::SQS::Queue`\n- `AWS::SSMMessages::ControlChannel`\n- `AWS::ThinClient::Device`\n- `AWS::ThinClient::Environment`\n- `AWS::Timestream::Database`\n- `AWS::Timestream::Table`\n- `AWS::VerifiedPermissions::PolicyStore`\n\nYou can have only one `resources.type` \ufb01eld per selector. To log data events on more than one resource type, add another selector.\n- *`resources.ARN`* - You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. For example, if resources.type equals `AWS::S3::Object` , the ARN must be in one of the following formats. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value.\n\nThe trailing slash is intentional; do not exclude it. Replace the text between less than and greater than symbols (<>) with resource-specific information.\n\n- `arn::s3:::/`\n- `arn::s3::://`\n\nWhen resources.type equals `AWS::DynamoDB::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::dynamodb:::table/`\n\nWhen resources.type equals `AWS::Lambda::Function` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::lambda:::function:`\n\nWhen resources.type equals `AWS::AppConfig::Configuration` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::appconfig:::application//environment//configuration/`\n\nWhen resources.type equals `AWS::B2BI::Transformer` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::b2bi:::transformer/`\n\nWhen resources.type equals `AWS::Bedrock::AgentAlias` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::bedrock:::agent-alias//`\n\nWhen resources.type equals `AWS::Bedrock::KnowledgeBase` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::bedrock:::knowledge-base/`\n\nWhen resources.type equals `AWS::Cassandra::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cassandra:::/keyspace//table/`\n\nWhen resources.type equals `AWS::CloudFront::KeyValueStore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cloudfront:::key-value-store/`\n\nWhen resources.type equals `AWS::CloudTrail::Channel` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cloudtrail:::channel/`\n\nWhen resources.type equals `AWS::CodeWhisperer::Customization` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::codewhisperer:::customization/`\n\nWhen resources.type equals `AWS::CodeWhisperer::Profile` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::codewhisperer:::profile/`\n\nWhen resources.type equals `AWS::Cognito::IdentityPool` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cognito-identity:::identitypool/`\n\nWhen `resources.type` equals `AWS::DynamoDB::Stream` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::dynamodb:::table//stream/`\n\nWhen `resources.type` equals `AWS::EC2::Snapshot` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::ec2:::snapshot/`\n\nWhen `resources.type` equals `AWS::EMRWAL::Workspace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::emrwal:::workspace/`\n\nWhen `resources.type` equals `AWS::FinSpace::Environment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::finspace:::environment/`\n\nWhen `resources.type` equals `AWS::Glue::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::glue:::table//`\n\nWhen `resources.type` equals `AWS::GreengrassV2::ComponentVersion` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::greengrass:::components/`\n\nWhen `resources.type` equals `AWS::GreengrassV2::Deployment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::greengrass:::deployments/:guardduty:::detector/`\n\nWhen `resources.type` equals `AWS::IoT::Certificate` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iot:::cert/`\n\nWhen `resources.type` equals `AWS::IoT::Thing` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iot:::thing/`\n\nWhen `resources.type` equals `AWS::IoTSiteWise::Asset` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iotsitewise:::asset/`\n\nWhen `resources.type` equals `AWS::IoTSiteWise::TimeSeries` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iotsitewise:::timeseries/`\n\nWhen `resources.type` equals `AWS::IoTTwinMaker::Entity` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iottwinmaker:::workspace//entity/`\n\nWhen `resources.type` equals `AWS::IoTTwinMaker::Workspace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iottwinmaker:::workspace/`\n\nWhen `resources.type` equals `AWS::KendraRanking::ExecutionPlan` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::kendra-ranking:::rescore-execution-plan/`\n\nWhen `resources.type` equals `AWS::KinesisVideo::Stream` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::kinesisvideo:::stream//`\n\nWhen `resources.type` equals `AWS::ManagedBlockchain::Network` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::managedblockchain:::networks/`\n\nWhen `resources.type` equals `AWS::ManagedBlockchain::Node` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::managedblockchain:::nodes/`\n\nWhen `resources.type` equals `AWS::MedicalImaging::Datastore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::medical-imaging:::datastore/`\n\nWhen `resources.type` equals `AWS::NeptuneGraph::Graph` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::neptune-graph:::graph/`\n\nWhen `resources.type` equals `AWS::PCAConnectorAD::Connector` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::pca-connector-ad:::connector/`\n\nWhen `resources.type` equals `AWS::QBusiness::Application` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application/`\n\nWhen `resources.type` equals `AWS::QBusiness::DataSource` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//index//data-source/`\n\nWhen `resources.type` equals `AWS::QBusiness::Index` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//index/`\n\nWhen `resources.type` equals `AWS::QBusiness::WebExperience` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//web-experience/`\n\nWhen `resources.type` equals `AWS::RDS::DBCluster` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::rds:::cluster/`\n\nWhen `resources.type` equals `AWS::S3::AccessPoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in one of the following formats. To log events on all objects in an S3 access point, we recommend that you use only the access point ARN, don\u2019t include the object path, and use the `StartsWith` or `NotStartsWith` operators.\n\n- `arn::s3:::accesspoint/`\n- `arn::s3:::accesspoint//object/`\n\nWhen `resources.type` equals `AWS::S3ObjectLambda::AccessPoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::s3-object-lambda:::accesspoint/`\n\nWhen `resources.type` equals `AWS::S3Outposts::Object` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::s3-outposts:::`\n\nWhen `resources.type` equals `AWS::SageMaker::Endpoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::endpoint/`\n\nWhen `resources.type` equals `AWS::SageMaker::ExperimentTrialComponent` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::experiment-trial-component/`\n\nWhen `resources.type` equals `AWS::SageMaker::FeatureGroup` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::feature-group/`\n\nWhen `resources.type` equals `AWS::SCN::Instance` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::scn:::instance/`\n\nWhen `resources.type` equals `AWS::ServiceDiscovery::Namespace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::servicediscovery:::namespace/`\n\nWhen `resources.type` equals `AWS::ServiceDiscovery::Service` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::servicediscovery:::service/`\n\nWhen `resources.type` equals `AWS::SNS::PlatformEndpoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sns:::endpoint///`\n\nWhen `resources.type` equals `AWS::SNS::Topic` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sns:::`\n\nWhen `resources.type` equals `AWS::SQS::Queue` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sqs:::`\n\nWhen `resources.type` equals `AWS::SSMMessages::ControlChannel` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::ssmmessages:::control-channel/`\n\nWhen `resources.type` equals `AWS::ThinClient::Device` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::thinclient:::device/`\n\nWhen `resources.type` equals `AWS::ThinClient::Environment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::thinclient:::environment/`\n\nWhen `resources.type` equals `AWS::Timestream::Database` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::timestream:::database/`\n\nWhen `resources.type` equals `AWS::Timestream::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::timestream:::database//table/`\n\nWhen resources.type equals `AWS::VerifiedPermissions::PolicyStore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::verifiedpermissions:::policy-store/`", + "markdownDescription": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `readOnly` , `eventCategory` , and `eventSource` .\n\nFor CloudTrail data events, supported fields include `readOnly` , `eventCategory` , `eventName` , `resources.type` , and `resources.ARN` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - Optional. Can be set to `Equals` a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - For filtering management events only. This can be set to `NotEquals` `kms.amazonaws.com` or `NotEquals` `rdsdata.amazonaws.com` .\n- *`eventName`* - Can use any operator. You can use it to \ufb01lter in or \ufb01lter out any data event logged to CloudTrail, such as `PutBucket` or `GetSnapshotBlock` . You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator, and the value can be one of the following:\n\n- `AWS::DynamoDB::Table`\n- `AWS::Lambda::Function`\n- `AWS::S3::Object`\n- `AWS::AppConfig::Configuration`\n- `AWS::B2BI::Transformer`\n- `AWS::Bedrock::AgentAlias`\n- `AWS::Bedrock::KnowledgeBase`\n- `AWS::Cassandra::Table`\n- `AWS::CloudFront::KeyValueStore`\n- `AWS::CloudTrail::Channel`\n- `AWS::CodeWhisperer::Customization`\n- `AWS::CodeWhisperer::Profile`\n- `AWS::Cognito::IdentityPool`\n- `AWS::DynamoDB::Stream`\n- `AWS::EC2::Snapshot`\n- `AWS::EMRWAL::Workspace`\n- `AWS::FinSpace::Environment`\n- `AWS::Glue::Table`\n- `AWS::GreengrassV2::ComponentVersion`\n- `AWS::GreengrassV2::Deployment`\n- `AWS::GuardDuty::Detector`\n- `AWS::IoT::Certificate`\n- `AWS::IoT::Thing`\n- `AWS::IoTSiteWise::Asset`\n- `AWS::IoTSiteWise::TimeSeries`\n- `AWS::IoTTwinMaker::Entity`\n- `AWS::IoTTwinMaker::Workspace`\n- `AWS::KendraRanking::ExecutionPlan`\n- `AWS::KinesisVideo::Stream`\n- `AWS::ManagedBlockchain::Network`\n- `AWS::ManagedBlockchain::Node`\n- `AWS::MedicalImaging::Datastore`\n- `AWS::NeptuneGraph::Graph`\n- `AWS::PCAConnectorAD::Connector`\n- `AWS::QBusiness::Application`\n- `AWS::QBusiness::DataSource`\n- `AWS::QBusiness::Index`\n- `AWS::QBusiness::WebExperience`\n- `AWS::RDS::DBCluster`\n- `AWS::S3::AccessPoint`\n- `AWS::S3ObjectLambda::AccessPoint`\n- `AWS::S3Outposts::Object`\n- `AWS::SageMaker::Endpoint`\n- `AWS::SageMaker::ExperimentTrialComponent`\n- `AWS::SageMaker::FeatureGroup`\n- `AWS::ServiceDiscovery::Namespace`\n- `AWS::ServiceDiscovery::Service`\n- `AWS::SCN::Instance`\n- `AWS::SNS::PlatformEndpoint`\n- `AWS::SNS::Topic`\n- `AWS::SWF::Domain`\n- `AWS::SQS::Queue`\n- `AWS::SSMMessages::ControlChannel`\n- `AWS::ThinClient::Device`\n- `AWS::ThinClient::Environment`\n- `AWS::Timestream::Database`\n- `AWS::Timestream::Table`\n- `AWS::VerifiedPermissions::PolicyStore`\n\nYou can have only one `resources.type` \ufb01eld per selector. To log data events on more than one resource type, add another selector.\n- *`resources.ARN`* - You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. For example, if resources.type equals `AWS::S3::Object` , the ARN must be in one of the following formats. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value.\n\nThe trailing slash is intentional; do not exclude it. Replace the text between less than and greater than symbols (<>) with resource-specific information.\n\n- `arn::s3:::/`\n- `arn::s3::://`\n\nWhen resources.type equals `AWS::DynamoDB::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::dynamodb:::table/`\n\nWhen resources.type equals `AWS::Lambda::Function` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::lambda:::function:`\n\nWhen resources.type equals `AWS::AppConfig::Configuration` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::appconfig:::application//environment//configuration/`\n\nWhen resources.type equals `AWS::B2BI::Transformer` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::b2bi:::transformer/`\n\nWhen resources.type equals `AWS::Bedrock::AgentAlias` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::bedrock:::agent-alias//`\n\nWhen resources.type equals `AWS::Bedrock::KnowledgeBase` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::bedrock:::knowledge-base/`\n\nWhen resources.type equals `AWS::Cassandra::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cassandra:::/keyspace//table/`\n\nWhen resources.type equals `AWS::CloudFront::KeyValueStore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cloudfront:::key-value-store/`\n\nWhen resources.type equals `AWS::CloudTrail::Channel` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cloudtrail:::channel/`\n\nWhen resources.type equals `AWS::CodeWhisperer::Customization` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::codewhisperer:::customization/`\n\nWhen resources.type equals `AWS::CodeWhisperer::Profile` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::codewhisperer:::profile/`\n\nWhen resources.type equals `AWS::Cognito::IdentityPool` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cognito-identity:::identitypool/`\n\nWhen `resources.type` equals `AWS::DynamoDB::Stream` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::dynamodb:::table//stream/`\n\nWhen `resources.type` equals `AWS::EC2::Snapshot` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::ec2:::snapshot/`\n\nWhen `resources.type` equals `AWS::EMRWAL::Workspace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::emrwal:::workspace/`\n\nWhen `resources.type` equals `AWS::FinSpace::Environment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::finspace:::environment/`\n\nWhen `resources.type` equals `AWS::Glue::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::glue:::table//`\n\nWhen `resources.type` equals `AWS::GreengrassV2::ComponentVersion` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::greengrass:::components/`\n\nWhen `resources.type` equals `AWS::GreengrassV2::Deployment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::greengrass:::deployments/:guardduty:::detector/`\n\nWhen `resources.type` equals `AWS::IoT::Certificate` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iot:::cert/`\n\nWhen `resources.type` equals `AWS::IoT::Thing` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iot:::thing/`\n\nWhen `resources.type` equals `AWS::IoTSiteWise::Asset` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iotsitewise:::asset/`\n\nWhen `resources.type` equals `AWS::IoTSiteWise::TimeSeries` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iotsitewise:::timeseries/`\n\nWhen `resources.type` equals `AWS::IoTTwinMaker::Entity` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iottwinmaker:::workspace//entity/`\n\nWhen `resources.type` equals `AWS::IoTTwinMaker::Workspace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iottwinmaker:::workspace/`\n\nWhen `resources.type` equals `AWS::KendraRanking::ExecutionPlan` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::kendra-ranking:::rescore-execution-plan/`\n\nWhen `resources.type` equals `AWS::KinesisVideo::Stream` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::kinesisvideo:::stream//`\n\nWhen `resources.type` equals `AWS::ManagedBlockchain::Network` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::managedblockchain:::networks/`\n\nWhen `resources.type` equals `AWS::ManagedBlockchain::Node` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::managedblockchain:::nodes/`\n\nWhen `resources.type` equals `AWS::MedicalImaging::Datastore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::medical-imaging:::datastore/`\n\nWhen `resources.type` equals `AWS::NeptuneGraph::Graph` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::neptune-graph:::graph/`\n\nWhen `resources.type` equals `AWS::PCAConnectorAD::Connector` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::pca-connector-ad:::connector/`\n\nWhen `resources.type` equals `AWS::QBusiness::Application` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application/`\n\nWhen `resources.type` equals `AWS::QBusiness::DataSource` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//index//data-source/`\n\nWhen `resources.type` equals `AWS::QBusiness::Index` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//index/`\n\nWhen `resources.type` equals `AWS::QBusiness::WebExperience` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//web-experience/`\n\nWhen `resources.type` equals `AWS::RDS::DBCluster` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::rds:::cluster/`\n\nWhen `resources.type` equals `AWS::S3::AccessPoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in one of the following formats. To log events on all objects in an S3 access point, we recommend that you use only the access point ARN, don\u2019t include the object path, and use the `StartsWith` or `NotStartsWith` operators.\n\n- `arn::s3:::accesspoint/`\n- `arn::s3:::accesspoint//object/`\n\nWhen `resources.type` equals `AWS::S3ObjectLambda::AccessPoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::s3-object-lambda:::accesspoint/`\n\nWhen `resources.type` equals `AWS::S3Outposts::Object` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::s3-outposts:::`\n\nWhen `resources.type` equals `AWS::SageMaker::Endpoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::endpoint/`\n\nWhen `resources.type` equals `AWS::SageMaker::ExperimentTrialComponent` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::experiment-trial-component/`\n\nWhen `resources.type` equals `AWS::SageMaker::FeatureGroup` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::feature-group/`\n\nWhen `resources.type` equals `AWS::SCN::Instance` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::scn:::instance/`\n\nWhen `resources.type` equals `AWS::ServiceDiscovery::Namespace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::servicediscovery:::namespace/`\n\nWhen `resources.type` equals `AWS::ServiceDiscovery::Service` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::servicediscovery:::service/`\n\nWhen `resources.type` equals `AWS::SNS::PlatformEndpoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sns:::endpoint///`\n\nWhen `resources.type` equals `AWS::SNS::Topic` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sns:::`\n\nWhen `resources.type` equals `AWS::SWF::Domain` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::swf:::domain/`\n\nWhen `resources.type` equals `AWS::SQS::Queue` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sqs:::`\n\nWhen `resources.type` equals `AWS::SSMMessages::ControlChannel` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::ssmmessages:::control-channel/`\n\nWhen `resources.type` equals `AWS::ThinClient::Device` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::thinclient:::device/`\n\nWhen `resources.type` equals `AWS::ThinClient::Environment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::thinclient:::environment/`\n\nWhen `resources.type` equals `AWS::Timestream::Database` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::timestream:::database/`\n\nWhen `resources.type` equals `AWS::Timestream::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::timestream:::database//table/`\n\nWhen resources.type equals `AWS::VerifiedPermissions::PolicyStore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::verifiedpermissions:::policy-store/`", "title": "Field", "type": "string" }, @@ -36571,7 +36569,7 @@ "type": "array" }, "Field": { - "markdownDescription": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `readOnly` , `eventCategory` , and `eventSource` .\n\nFor CloudTrail data events, supported fields include `readOnly` , `eventCategory` , `eventName` , `resources.type` , and `resources.ARN` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - Optional. Can be set to `Equals` a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - For filtering management events only. This can be set to `NotEquals` `kms.amazonaws.com` or `NotEquals` `rdsdata.amazonaws.com` .\n- *`eventName`* - Can use any operator. You can use it to \ufb01lter in or \ufb01lter out any data event logged to CloudTrail, such as `PutBucket` or `GetSnapshotBlock` . You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator, and the value can be one of the following:\n\n- `AWS::DynamoDB::Table`\n- `AWS::Lambda::Function`\n- `AWS::S3::Object`\n- `AWS::AppConfig::Configuration`\n- `AWS::B2BI::Transformer`\n- `AWS::Bedrock::AgentAlias`\n- `AWS::Bedrock::KnowledgeBase`\n- `AWS::Cassandra::Table`\n- `AWS::CloudFront::KeyValueStore`\n- `AWS::CloudTrail::Channel`\n- `AWS::CodeWhisperer::Customization`\n- `AWS::CodeWhisperer::Profile`\n- `AWS::Cognito::IdentityPool`\n- `AWS::DynamoDB::Stream`\n- `AWS::EC2::Snapshot`\n- `AWS::EMRWAL::Workspace`\n- `AWS::FinSpace::Environment`\n- `AWS::Glue::Table`\n- `AWS::GreengrassV2::ComponentVersion`\n- `AWS::GreengrassV2::Deployment`\n- `AWS::GuardDuty::Detector`\n- `AWS::IoT::Certificate`\n- `AWS::IoT::Thing`\n- `AWS::IoTSiteWise::Asset`\n- `AWS::IoTSiteWise::TimeSeries`\n- `AWS::IoTTwinMaker::Entity`\n- `AWS::IoTTwinMaker::Workspace`\n- `AWS::KendraRanking::ExecutionPlan`\n- `AWS::KinesisVideo::Stream`\n- `AWS::ManagedBlockchain::Network`\n- `AWS::ManagedBlockchain::Node`\n- `AWS::MedicalImaging::Datastore`\n- `AWS::NeptuneGraph::Graph`\n- `AWS::PCAConnectorAD::Connector`\n- `AWS::QBusiness::Application`\n- `AWS::QBusiness::DataSource`\n- `AWS::QBusiness::Index`\n- `AWS::QBusiness::WebExperience`\n- `AWS::RDS::DBCluster`\n- `AWS::S3::AccessPoint`\n- `AWS::S3ObjectLambda::AccessPoint`\n- `AWS::S3Outposts::Object`\n- `AWS::SageMaker::Endpoint`\n- `AWS::SageMaker::ExperimentTrialComponent`\n- `AWS::SageMaker::FeatureGroup`\n- `AWS::ServiceDiscovery::Namespace`\n- `AWS::ServiceDiscovery::Service`\n- `AWS::SCN::Instance`\n- `AWS::SNS::PlatformEndpoint`\n- `AWS::SNS::Topic`\n- `AWS::SQS::Queue`\n- `AWS::SSMMessages::ControlChannel`\n- `AWS::ThinClient::Device`\n- `AWS::ThinClient::Environment`\n- `AWS::Timestream::Database`\n- `AWS::Timestream::Table`\n- `AWS::VerifiedPermissions::PolicyStore`\n\nYou can have only one `resources.type` \ufb01eld per selector. To log data events on more than one resource type, add another selector.\n- *`resources.ARN`* - You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. For example, if resources.type equals `AWS::S3::Object` , the ARN must be in one of the following formats. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value.\n\nThe trailing slash is intentional; do not exclude it. Replace the text between less than and greater than symbols (<>) with resource-specific information.\n\n- `arn::s3:::/`\n- `arn::s3::://`\n\nWhen resources.type equals `AWS::DynamoDB::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::dynamodb:::table/`\n\nWhen resources.type equals `AWS::Lambda::Function` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::lambda:::function:`\n\nWhen resources.type equals `AWS::AppConfig::Configuration` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::appconfig:::application//environment//configuration/`\n\nWhen resources.type equals `AWS::B2BI::Transformer` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::b2bi:::transformer/`\n\nWhen resources.type equals `AWS::Bedrock::AgentAlias` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::bedrock:::agent-alias//`\n\nWhen resources.type equals `AWS::Bedrock::KnowledgeBase` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::bedrock:::knowledge-base/`\n\nWhen resources.type equals `AWS::Cassandra::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cassandra:::/keyspace//table/`\n\nWhen resources.type equals `AWS::CloudFront::KeyValueStore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cloudfront:::key-value-store/`\n\nWhen resources.type equals `AWS::CloudTrail::Channel` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cloudtrail:::channel/`\n\nWhen resources.type equals `AWS::CodeWhisperer::Customization` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::codewhisperer:::customization/`\n\nWhen resources.type equals `AWS::CodeWhisperer::Profile` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::codewhisperer:::profile/`\n\nWhen resources.type equals `AWS::Cognito::IdentityPool` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cognito-identity:::identitypool/`\n\nWhen `resources.type` equals `AWS::DynamoDB::Stream` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::dynamodb:::table//stream/`\n\nWhen `resources.type` equals `AWS::EC2::Snapshot` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::ec2:::snapshot/`\n\nWhen `resources.type` equals `AWS::EMRWAL::Workspace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::emrwal:::workspace/`\n\nWhen `resources.type` equals `AWS::FinSpace::Environment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::finspace:::environment/`\n\nWhen `resources.type` equals `AWS::Glue::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::glue:::table//`\n\nWhen `resources.type` equals `AWS::GreengrassV2::ComponentVersion` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::greengrass:::components/`\n\nWhen `resources.type` equals `AWS::GreengrassV2::Deployment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::greengrass:::deployments/:guardduty:::detector/`\n\nWhen `resources.type` equals `AWS::IoT::Certificate` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iot:::cert/`\n\nWhen `resources.type` equals `AWS::IoT::Thing` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iot:::thing/`\n\nWhen `resources.type` equals `AWS::IoTSiteWise::Asset` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iotsitewise:::asset/`\n\nWhen `resources.type` equals `AWS::IoTSiteWise::TimeSeries` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iotsitewise:::timeseries/`\n\nWhen `resources.type` equals `AWS::IoTTwinMaker::Entity` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iottwinmaker:::workspace//entity/`\n\nWhen `resources.type` equals `AWS::IoTTwinMaker::Workspace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iottwinmaker:::workspace/`\n\nWhen `resources.type` equals `AWS::KendraRanking::ExecutionPlan` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::kendra-ranking:::rescore-execution-plan/`\n\nWhen `resources.type` equals `AWS::KinesisVideo::Stream` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::kinesisvideo:::stream//`\n\nWhen `resources.type` equals `AWS::ManagedBlockchain::Network` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::managedblockchain:::networks/`\n\nWhen `resources.type` equals `AWS::ManagedBlockchain::Node` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::managedblockchain:::nodes/`\n\nWhen `resources.type` equals `AWS::MedicalImaging::Datastore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::medical-imaging:::datastore/`\n\nWhen `resources.type` equals `AWS::NeptuneGraph::Graph` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::neptune-graph:::graph/`\n\nWhen `resources.type` equals `AWS::PCAConnectorAD::Connector` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::pca-connector-ad:::connector/`\n\nWhen `resources.type` equals `AWS::QBusiness::Application` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application/`\n\nWhen `resources.type` equals `AWS::QBusiness::DataSource` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//index//data-source/`\n\nWhen `resources.type` equals `AWS::QBusiness::Index` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//index/`\n\nWhen `resources.type` equals `AWS::QBusiness::WebExperience` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//web-experience/`\n\nWhen `resources.type` equals `AWS::RDS::DBCluster` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::rds:::cluster/`\n\nWhen `resources.type` equals `AWS::S3::AccessPoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in one of the following formats. To log events on all objects in an S3 access point, we recommend that you use only the access point ARN, don\u2019t include the object path, and use the `StartsWith` or `NotStartsWith` operators.\n\n- `arn::s3:::accesspoint/`\n- `arn::s3:::accesspoint//object/`\n\nWhen `resources.type` equals `AWS::S3ObjectLambda::AccessPoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::s3-object-lambda:::accesspoint/`\n\nWhen `resources.type` equals `AWS::S3Outposts::Object` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::s3-outposts:::`\n\nWhen `resources.type` equals `AWS::SageMaker::Endpoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::endpoint/`\n\nWhen `resources.type` equals `AWS::SageMaker::ExperimentTrialComponent` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::experiment-trial-component/`\n\nWhen `resources.type` equals `AWS::SageMaker::FeatureGroup` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::feature-group/`\n\nWhen `resources.type` equals `AWS::SCN::Instance` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::scn:::instance/`\n\nWhen `resources.type` equals `AWS::ServiceDiscovery::Namespace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::servicediscovery:::namespace/`\n\nWhen `resources.type` equals `AWS::ServiceDiscovery::Service` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::servicediscovery:::service/`\n\nWhen `resources.type` equals `AWS::SNS::PlatformEndpoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sns:::endpoint///`\n\nWhen `resources.type` equals `AWS::SNS::Topic` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sns:::`\n\nWhen `resources.type` equals `AWS::SQS::Queue` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sqs:::`\n\nWhen `resources.type` equals `AWS::SSMMessages::ControlChannel` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::ssmmessages:::control-channel/`\n\nWhen `resources.type` equals `AWS::ThinClient::Device` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::thinclient:::device/`\n\nWhen `resources.type` equals `AWS::ThinClient::Environment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::thinclient:::environment/`\n\nWhen `resources.type` equals `AWS::Timestream::Database` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::timestream:::database/`\n\nWhen `resources.type` equals `AWS::Timestream::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::timestream:::database//table/`\n\nWhen resources.type equals `AWS::VerifiedPermissions::PolicyStore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::verifiedpermissions:::policy-store/`", + "markdownDescription": "A field in a CloudTrail event record on which to filter events to be logged. For event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the field is used only for selecting events as filtering is not supported.\n\nFor CloudTrail management events, supported fields include `readOnly` , `eventCategory` , and `eventSource` .\n\nFor CloudTrail data events, supported fields include `readOnly` , `eventCategory` , `eventName` , `resources.type` , and `resources.ARN` .\n\nFor event data stores for CloudTrail Insights events, AWS Config configuration items, Audit Manager evidence, or events outside of AWS , the only supported field is `eventCategory` .\n\n- *`readOnly`* - Optional. Can be set to `Equals` a value of `true` or `false` . If you do not add this field, CloudTrail logs both `read` and `write` events. A value of `true` logs only `read` events. A value of `false` logs only `write` events.\n- *`eventSource`* - For filtering management events only. This can be set to `NotEquals` `kms.amazonaws.com` or `NotEquals` `rdsdata.amazonaws.com` .\n- *`eventName`* - Can use any operator. You can use it to \ufb01lter in or \ufb01lter out any data event logged to CloudTrail, such as `PutBucket` or `GetSnapshotBlock` . You can have multiple values for this \ufb01eld, separated by commas.\n- *`eventCategory`* - This is required and must be set to `Equals` .\n\n- For CloudTrail management events, the value must be `Management` .\n- For CloudTrail data events, the value must be `Data` .\n\nThe following are used only for event data stores:\n\n- For CloudTrail Insights events, the value must be `Insight` .\n- For AWS Config configuration items, the value must be `ConfigurationItem` .\n- For Audit Manager evidence, the value must be `Evidence` .\n- For non- AWS events, the value must be `ActivityAuditLog` .\n- *`resources.type`* - This \ufb01eld is required for CloudTrail data events. `resources.type` can only use the `Equals` operator, and the value can be one of the following:\n\n- `AWS::DynamoDB::Table`\n- `AWS::Lambda::Function`\n- `AWS::S3::Object`\n- `AWS::AppConfig::Configuration`\n- `AWS::B2BI::Transformer`\n- `AWS::Bedrock::AgentAlias`\n- `AWS::Bedrock::KnowledgeBase`\n- `AWS::Cassandra::Table`\n- `AWS::CloudFront::KeyValueStore`\n- `AWS::CloudTrail::Channel`\n- `AWS::CodeWhisperer::Customization`\n- `AWS::CodeWhisperer::Profile`\n- `AWS::Cognito::IdentityPool`\n- `AWS::DynamoDB::Stream`\n- `AWS::EC2::Snapshot`\n- `AWS::EMRWAL::Workspace`\n- `AWS::FinSpace::Environment`\n- `AWS::Glue::Table`\n- `AWS::GreengrassV2::ComponentVersion`\n- `AWS::GreengrassV2::Deployment`\n- `AWS::GuardDuty::Detector`\n- `AWS::IoT::Certificate`\n- `AWS::IoT::Thing`\n- `AWS::IoTSiteWise::Asset`\n- `AWS::IoTSiteWise::TimeSeries`\n- `AWS::IoTTwinMaker::Entity`\n- `AWS::IoTTwinMaker::Workspace`\n- `AWS::KendraRanking::ExecutionPlan`\n- `AWS::KinesisVideo::Stream`\n- `AWS::ManagedBlockchain::Network`\n- `AWS::ManagedBlockchain::Node`\n- `AWS::MedicalImaging::Datastore`\n- `AWS::NeptuneGraph::Graph`\n- `AWS::PCAConnectorAD::Connector`\n- `AWS::QBusiness::Application`\n- `AWS::QBusiness::DataSource`\n- `AWS::QBusiness::Index`\n- `AWS::QBusiness::WebExperience`\n- `AWS::RDS::DBCluster`\n- `AWS::S3::AccessPoint`\n- `AWS::S3ObjectLambda::AccessPoint`\n- `AWS::S3Outposts::Object`\n- `AWS::SageMaker::Endpoint`\n- `AWS::SageMaker::ExperimentTrialComponent`\n- `AWS::SageMaker::FeatureGroup`\n- `AWS::ServiceDiscovery::Namespace`\n- `AWS::ServiceDiscovery::Service`\n- `AWS::SCN::Instance`\n- `AWS::SNS::PlatformEndpoint`\n- `AWS::SNS::Topic`\n- `AWS::SWF::Domain`\n- `AWS::SQS::Queue`\n- `AWS::SSMMessages::ControlChannel`\n- `AWS::ThinClient::Device`\n- `AWS::ThinClient::Environment`\n- `AWS::Timestream::Database`\n- `AWS::Timestream::Table`\n- `AWS::VerifiedPermissions::PolicyStore`\n\nYou can have only one `resources.type` \ufb01eld per selector. To log data events on more than one resource type, add another selector.\n- *`resources.ARN`* - You can use any operator with `resources.ARN` , but if you use `Equals` or `NotEquals` , the value must exactly match the ARN of a valid resource of the type you've speci\ufb01ed in the template as the value of resources.type. For example, if resources.type equals `AWS::S3::Object` , the ARN must be in one of the following formats. To log all data events for all objects in a specific S3 bucket, use the `StartsWith` operator, and include only the bucket ARN as the matching value.\n\nThe trailing slash is intentional; do not exclude it. Replace the text between less than and greater than symbols (<>) with resource-specific information.\n\n- `arn::s3:::/`\n- `arn::s3::://`\n\nWhen resources.type equals `AWS::DynamoDB::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::dynamodb:::table/`\n\nWhen resources.type equals `AWS::Lambda::Function` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::lambda:::function:`\n\nWhen resources.type equals `AWS::AppConfig::Configuration` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::appconfig:::application//environment//configuration/`\n\nWhen resources.type equals `AWS::B2BI::Transformer` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::b2bi:::transformer/`\n\nWhen resources.type equals `AWS::Bedrock::AgentAlias` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::bedrock:::agent-alias//`\n\nWhen resources.type equals `AWS::Bedrock::KnowledgeBase` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::bedrock:::knowledge-base/`\n\nWhen resources.type equals `AWS::Cassandra::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cassandra:::/keyspace//table/`\n\nWhen resources.type equals `AWS::CloudFront::KeyValueStore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cloudfront:::key-value-store/`\n\nWhen resources.type equals `AWS::CloudTrail::Channel` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cloudtrail:::channel/`\n\nWhen resources.type equals `AWS::CodeWhisperer::Customization` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::codewhisperer:::customization/`\n\nWhen resources.type equals `AWS::CodeWhisperer::Profile` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::codewhisperer:::profile/`\n\nWhen resources.type equals `AWS::Cognito::IdentityPool` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::cognito-identity:::identitypool/`\n\nWhen `resources.type` equals `AWS::DynamoDB::Stream` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::dynamodb:::table//stream/`\n\nWhen `resources.type` equals `AWS::EC2::Snapshot` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::ec2:::snapshot/`\n\nWhen `resources.type` equals `AWS::EMRWAL::Workspace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::emrwal:::workspace/`\n\nWhen `resources.type` equals `AWS::FinSpace::Environment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::finspace:::environment/`\n\nWhen `resources.type` equals `AWS::Glue::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::glue:::table//`\n\nWhen `resources.type` equals `AWS::GreengrassV2::ComponentVersion` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::greengrass:::components/`\n\nWhen `resources.type` equals `AWS::GreengrassV2::Deployment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::greengrass:::deployments/:guardduty:::detector/`\n\nWhen `resources.type` equals `AWS::IoT::Certificate` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iot:::cert/`\n\nWhen `resources.type` equals `AWS::IoT::Thing` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iot:::thing/`\n\nWhen `resources.type` equals `AWS::IoTSiteWise::Asset` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iotsitewise:::asset/`\n\nWhen `resources.type` equals `AWS::IoTSiteWise::TimeSeries` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iotsitewise:::timeseries/`\n\nWhen `resources.type` equals `AWS::IoTTwinMaker::Entity` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iottwinmaker:::workspace//entity/`\n\nWhen `resources.type` equals `AWS::IoTTwinMaker::Workspace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::iottwinmaker:::workspace/`\n\nWhen `resources.type` equals `AWS::KendraRanking::ExecutionPlan` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::kendra-ranking:::rescore-execution-plan/`\n\nWhen `resources.type` equals `AWS::KinesisVideo::Stream` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::kinesisvideo:::stream//`\n\nWhen `resources.type` equals `AWS::ManagedBlockchain::Network` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::managedblockchain:::networks/`\n\nWhen `resources.type` equals `AWS::ManagedBlockchain::Node` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::managedblockchain:::nodes/`\n\nWhen `resources.type` equals `AWS::MedicalImaging::Datastore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::medical-imaging:::datastore/`\n\nWhen `resources.type` equals `AWS::NeptuneGraph::Graph` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::neptune-graph:::graph/`\n\nWhen `resources.type` equals `AWS::PCAConnectorAD::Connector` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::pca-connector-ad:::connector/`\n\nWhen `resources.type` equals `AWS::QBusiness::Application` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application/`\n\nWhen `resources.type` equals `AWS::QBusiness::DataSource` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//index//data-source/`\n\nWhen `resources.type` equals `AWS::QBusiness::Index` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//index/`\n\nWhen `resources.type` equals `AWS::QBusiness::WebExperience` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::qbusiness:::application//web-experience/`\n\nWhen `resources.type` equals `AWS::RDS::DBCluster` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::rds:::cluster/`\n\nWhen `resources.type` equals `AWS::S3::AccessPoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in one of the following formats. To log events on all objects in an S3 access point, we recommend that you use only the access point ARN, don\u2019t include the object path, and use the `StartsWith` or `NotStartsWith` operators.\n\n- `arn::s3:::accesspoint/`\n- `arn::s3:::accesspoint//object/`\n\nWhen `resources.type` equals `AWS::S3ObjectLambda::AccessPoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::s3-object-lambda:::accesspoint/`\n\nWhen `resources.type` equals `AWS::S3Outposts::Object` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::s3-outposts:::`\n\nWhen `resources.type` equals `AWS::SageMaker::Endpoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::endpoint/`\n\nWhen `resources.type` equals `AWS::SageMaker::ExperimentTrialComponent` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::experiment-trial-component/`\n\nWhen `resources.type` equals `AWS::SageMaker::FeatureGroup` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sagemaker:::feature-group/`\n\nWhen `resources.type` equals `AWS::SCN::Instance` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::scn:::instance/`\n\nWhen `resources.type` equals `AWS::ServiceDiscovery::Namespace` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::servicediscovery:::namespace/`\n\nWhen `resources.type` equals `AWS::ServiceDiscovery::Service` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::servicediscovery:::service/`\n\nWhen `resources.type` equals `AWS::SNS::PlatformEndpoint` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sns:::endpoint///`\n\nWhen `resources.type` equals `AWS::SNS::Topic` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sns:::`\n\nWhen `resources.type` equals `AWS::SWF::Domain` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::swf:::domain/`\n\nWhen `resources.type` equals `AWS::SQS::Queue` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::sqs:::`\n\nWhen `resources.type` equals `AWS::SSMMessages::ControlChannel` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::ssmmessages:::control-channel/`\n\nWhen `resources.type` equals `AWS::ThinClient::Device` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::thinclient:::device/`\n\nWhen `resources.type` equals `AWS::ThinClient::Environment` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::thinclient:::environment/`\n\nWhen `resources.type` equals `AWS::Timestream::Database` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::timestream:::database/`\n\nWhen `resources.type` equals `AWS::Timestream::Table` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::timestream:::database//table/`\n\nWhen resources.type equals `AWS::VerifiedPermissions::PolicyStore` , and the operator is set to `Equals` or `NotEquals` , the ARN must be in the following format:\n\n- `arn::verifiedpermissions:::policy-store/`", "title": "Field", "type": "string" }, @@ -36737,7 +36735,7 @@ "type": "string" }, "ComparisonOperator": { - "markdownDescription": "The arithmetic operation to use when comparing the specified statistic and threshold. The specified statistic value is used as the first operand.\n\nYou can specify the following values: `GreaterThanThreshold` , `GreaterThanOrEqualToThreshold` , `LessThanThreshold` , or `LessThanOrEqualToThreshold` .", + "markdownDescription": "The arithmetic operation to use when comparing the specified statistic and threshold. The specified statistic value is used as the first operand.", "title": "ComparisonOperator", "type": "string" }, @@ -38687,7 +38685,7 @@ "type": "string" }, "Type": { - "markdownDescription": "The type of webhook filter. There are six webhook filter types: `EVENT` , `ACTOR_ACCOUNT_ID` , `HEAD_REF` , `BASE_REF` , `FILE_PATH` , and `COMMIT_MESSAGE` .\n\n- **EVENT** - A webhook event triggers a build when the provided `pattern` matches one of five event types: `PUSH` , `PULL_REQUEST_CREATED` , `PULL_REQUEST_UPDATED` , `PULL_REQUEST_REOPENED` , and `PULL_REQUEST_MERGED` . The `EVENT` patterns are specified as a comma-separated string. For example, `PUSH, PULL_REQUEST_CREATED, PULL_REQUEST_UPDATED` filters all push, pull request created, and pull request updated events.\n\n> The `PULL_REQUEST_REOPENED` works with GitHub and GitHub Enterprise only.\n- **ACTOR_ACCOUNT_ID** - A webhook event triggers a build when a GitHub, GitHub Enterprise, or Bitbucket account ID matches the regular expression `pattern` .\n- **HEAD_REF** - A webhook event triggers a build when the head reference matches the regular expression `pattern` . For example, `refs/heads/branch-name` and `refs/tags/tag-name` .\n\nWorks with GitHub and GitHub Enterprise push, GitHub and GitHub Enterprise pull request, Bitbucket push, and Bitbucket pull request events.\n- **BASE_REF** - A webhook event triggers a build when the base reference matches the regular expression `pattern` . For example, `refs/heads/branch-name` .\n\n> Works with pull request events only.\n- **FILE_PATH** - A webhook triggers a build when the path of a changed file matches the regular expression `pattern` .\n\n> Works with GitHub and Bitbucket events push and pull requests events. Also works with GitHub Enterprise push events, but does not work with GitHub Enterprise pull request events.\n- **COMMIT_MESSAGE** - A webhook triggers a build when the head commit message matches the regular expression `pattern` .\n\n> Works with GitHub and Bitbucket events push and pull requests events. Also works with GitHub Enterprise push events, but does not work with GitHub Enterprise pull request events.", + "markdownDescription": "The type of webhook filter. There are six webhook filter types: `EVENT` , `ACTOR_ACCOUNT_ID` , `HEAD_REF` , `BASE_REF` , `FILE_PATH` , and `COMMIT_MESSAGE` .\n\n- EVENT\n\n- A webhook event triggers a build when the provided `pattern` matches one of six event types: `PUSH` , `PULL_REQUEST_CREATED` , `PULL_REQUEST_UPDATED` , `PULL_REQUEST_CLOSED` , `PULL_REQUEST_REOPENED` , and `PULL_REQUEST_MERGED` . The `EVENT` patterns are specified as a comma-separated string. For example, `PUSH, PULL_REQUEST_CREATED, PULL_REQUEST_UPDATED` filters all push, pull request created, and pull request updated events.\n\n> The `PULL_REQUEST_REOPENED` works with GitHub and GitHub Enterprise only.\n- ACTOR_ACCOUNT_ID\n\n- A webhook event triggers a build when a GitHub, GitHub Enterprise, or Bitbucket account ID matches the regular expression `pattern` .\n- HEAD_REF\n\n- A webhook event triggers a build when the head reference matches the regular expression `pattern` . For example, `refs/heads/branch-name` and `refs/tags/tag-name` .\n\nWorks with GitHub and GitHub Enterprise push, GitHub and GitHub Enterprise pull request, Bitbucket push, and Bitbucket pull request events.\n- BASE_REF\n\n- A webhook event triggers a build when the base reference matches the regular expression `pattern` . For example, `refs/heads/branch-name` .\n\n> Works with pull request events only.\n- FILE_PATH\n\n- A webhook triggers a build when the path of a changed file matches the regular expression `pattern` .\n\n> Works with GitHub and Bitbucket events push and pull requests events. Also works with GitHub Enterprise push events, but does not work with GitHub Enterprise pull request events.\n- COMMIT_MESSAGE\n\n- A webhook triggers a build when the head commit message matches the regular expression `pattern` .\n\n> Works with GitHub and Bitbucket events push and pull requests events. Also works with GitHub Enterprise push events, but does not work with GitHub Enterprise pull request events.", "title": "Type", "type": "string" } @@ -42270,7 +42268,7 @@ "type": "array" }, "DeletionProtection": { - "markdownDescription": "When active, `DeletionProtection` prevents accidental deletion of your user\npool. Before you can delete a user pool that you have protected against deletion, you\nmust deactivate this feature.\n\nWhen you try to delete a protected user pool in a `DeleteUserPool` API request, Amazon Cognito returns an `InvalidParameterException` error. To delete a protected user pool, send a new `DeleteUserPool` request after you deactivate deletion protection in an `UpdateUserPool` API request.", + "markdownDescription": "When active, `DeletionProtection` prevents accidental deletion of your user pool. Before you can delete a user pool that you have protected against deletion, you must deactivate this feature.\n\nWhen you try to delete a protected user pool in a `DeleteUserPool` API request, Amazon Cognito returns an `InvalidParameterException` error. To delete a protected user pool, send a new `DeleteUserPool` request after you deactivate deletion protection in an `UpdateUserPool` API request.", "title": "DeletionProtection", "type": "string" }, @@ -42622,7 +42620,7 @@ "additionalProperties": false, "properties": { "MaxValue": { - "markdownDescription": "The maximum value of an attribute that is of the number data type.", + "markdownDescription": "The maximum length of a number attribute value. Must be a number less than or equal to `2^1023` , represented as a string with a length of 131072 characters or fewer.", "title": "MaxValue", "type": "string" }, @@ -42779,7 +42777,7 @@ "additionalProperties": false, "properties": { "MaxLength": { - "markdownDescription": "The maximum length.", + "markdownDescription": "The maximum length of a string attribute value. Must be a number less than or equal to `2^1023` , represented as a string with a length of 131072 characters or fewer.", "title": "MaxLength", "type": "string" }, @@ -43339,7 +43337,7 @@ "type": "array" }, "ProviderDetails": { - "markdownDescription": "The scopes, URLs, and identifiers for your external identity provider. The following\nexamples describe the provider detail keys for each IdP type. These values and their\nschema are subject to change. Social IdP `authorize_scopes` values must match\nthe values listed here.\n\n- **OpenID Connect (OIDC)** - Amazon Cognito accepts the following elements when it can't discover endpoint URLs from `oidc_issuer` : `attributes_url` , `authorize_url` , `jwks_uri` , `token_url` .\n\nCreate or update request: `\"ProviderDetails\": { \"attributes_request_method\": \"GET\", \"attributes_url\": \"https://auth.example.com/userInfo\", \"authorize_scopes\": \"openid profile email\", \"authorize_url\": \"https://auth.example.com/authorize\", \"client_id\": \"1example23456789\", \"client_secret\": \"provider-app-client-secret\", \"jwks_uri\": \"https://auth.example.com/.well-known/jwks.json\", \"oidc_issuer\": \"https://auth.example.com\", \"token_url\": \"https://example.com/token\" }`\n\nDescribe response: `\"ProviderDetails\": { \"attributes_request_method\": \"GET\", \"attributes_url\": \"https://auth.example.com/userInfo\", \"attributes_url_add_attributes\": \"false\", \"authorize_scopes\": \"openid profile email\", \"authorize_url\": \"https://auth.example.com/authorize\", \"client_id\": \"1example23456789\", \"client_secret\": \"provider-app-client-secret\", \"jwks_uri\": \"https://auth.example.com/.well-known/jwks.json\", \"oidc_issuer\": \"https://auth.example.com\", \"token_url\": \"https://example.com/token\" }`\n- **SAML** - Create or update request with Metadata URL: `\"ProviderDetails\": { \"IDPInit\": \"true\", \"IDPSignout\": \"true\", \"EncryptedResponses\" : \"true\", \"MetadataURL\": \"https://auth.example.com/sso/saml/metadata\", \"RequestSigningAlgorithm\": \"rsa-sha256\" }`\n\nCreate or update request with Metadata file: `\"ProviderDetails\": { \"IDPInit\": \"true\", \"IDPSignout\": \"true\", \"EncryptedResponses\" : \"true\", \"MetadataFile\": \"[metadata XML]\", \"RequestSigningAlgorithm\": \"rsa-sha256\" }`\n\nThe value of `MetadataFile` must be the plaintext metadata document with all quote (\") characters escaped by backslashes.\n\nDescribe response: `\"ProviderDetails\": { \"IDPInit\": \"true\", \"IDPSignout\": \"true\", \"EncryptedResponses\" : \"true\", \"ActiveEncryptionCertificate\": \"[certificate]\", \"MetadataURL\": \"https://auth.example.com/sso/saml/metadata\", \"RequestSigningAlgorithm\": \"rsa-sha256\", \"SLORedirectBindingURI\": \"https://auth.example.com/slo/saml\", \"SSORedirectBindingURI\": \"https://auth.example.com/sso/saml\" }`\n- **LoginWithAmazon** - Create or update request: `\"ProviderDetails\": { \"authorize_scopes\": \"profile postal_code\", \"client_id\": \"amzn1.application-oa2-client.1example23456789\", \"client_secret\": \"provider-app-client-secret\"`\n\nDescribe response: `\"ProviderDetails\": { \"attributes_url\": \"https://api.amazon.com/user/profile\", \"attributes_url_add_attributes\": \"false\", \"authorize_scopes\": \"profile postal_code\", \"authorize_url\": \"https://www.amazon.com/ap/oa\", \"client_id\": \"amzn1.application-oa2-client.1example23456789\", \"client_secret\": \"provider-app-client-secret\", \"token_request_method\": \"POST\", \"token_url\": \"https://api.amazon.com/auth/o2/token\" }`\n- **Google** - Create or update request: `\"ProviderDetails\": { \"authorize_scopes\": \"email profile openid\", \"client_id\": \"1example23456789.apps.googleusercontent.com\", \"client_secret\": \"provider-app-client-secret\" }`\n\nDescribe response: `\"ProviderDetails\": { \"attributes_url\": \"https://people.googleapis.com/v1/people/me?personFields=\", \"attributes_url_add_attributes\": \"true\", \"authorize_scopes\": \"email profile openid\", \"authorize_url\": \"https://accounts.google.com/o/oauth2/v2/auth\", \"client_id\": \"1example23456789.apps.googleusercontent.com\", \"client_secret\": \"provider-app-client-secret\", \"oidc_issuer\": \"https://accounts.google.com\", \"token_request_method\": \"POST\", \"token_url\": \"https://www.googleapis.com/oauth2/v4/token\" }`\n- **SignInWithApple** - Create or update request: `\"ProviderDetails\": { \"authorize_scopes\": \"email name\", \"client_id\": \"com.example.cognito\", \"private_key\": \"1EXAMPLE\", \"key_id\": \"2EXAMPLE\", \"team_id\": \"3EXAMPLE\" }`\n\nDescribe response: `\"ProviderDetails\": { \"attributes_url_add_attributes\": \"false\", \"authorize_scopes\": \"email name\", \"authorize_url\": \"https://appleid.apple.com/auth/authorize\", \"client_id\": \"com.example.cognito\", \"key_id\": \"1EXAMPLE\", \"oidc_issuer\": \"https://appleid.apple.com\", \"team_id\": \"2EXAMPLE\", \"token_request_method\": \"POST\", \"token_url\": \"https://appleid.apple.com/auth/token\" }`\n- **Facebook** - Create or update request: `\"ProviderDetails\": { \"api_version\": \"v17.0\", \"authorize_scopes\": \"public_profile, email\", \"client_id\": \"1example23456789\", \"client_secret\": \"provider-app-client-secret\" }`\n\nDescribe response: `\"ProviderDetails\": { \"api_version\": \"v17.0\", \"attributes_url\": \"https://graph.facebook.com/v17.0/me?fields=\", \"attributes_url_add_attributes\": \"true\", \"authorize_scopes\": \"public_profile, email\", \"authorize_url\": \"https://www.facebook.com/v17.0/dialog/oauth\", \"client_id\": \"1example23456789\", \"client_secret\": \"provider-app-client-secret\", \"token_request_method\": \"GET\", \"token_url\": \"https://graph.facebook.com/v17.0/oauth/access_token\" }`", + "markdownDescription": "The IdP details. The following list describes the provider detail keys for each IdP type.\n\n- For Google and Login with Amazon:\n\n- client_id\n- client_secret\n- authorize_scopes\n- For Facebook:\n\n- client_id\n- client_secret\n- authorize_scopes\n- api_version\n- For Sign in with Apple:\n\n- client_id\n- team_id\n- key_id\n- private_key\n- authorize_scopes\n- For OpenID Connect (OIDC) providers:\n\n- client_id\n- client_secret\n- attributes_request_method\n- oidc_issuer\n- authorize_scopes\n- The following keys are only present if Amazon Cognito didn't discover them at the `oidc_issuer` URL.\n\n- authorize_url\n- token_url\n- attributes_url\n- jwks_uri\n- Amazon Cognito sets the value of the following keys automatically. They are read-only.\n\n- attributes_url_add_attributes\n- For SAML providers:\n\n- MetadataFile or MetadataURL\n- IDPSignout *optional*", "title": "ProviderDetails", "type": "object" }, @@ -45356,7 +45354,7 @@ "additionalProperties": false, "properties": { "DocumentName": { - "markdownDescription": "The name or Amazon Resource Name (ARN) of the SSM document to use to create a conformance pack. If you use the document name, AWS Config checks only your account and AWS Region for the SSM document. If you want to use an SSM document from another Region or account, you must provide the ARN.", + "markdownDescription": "The name or Amazon Resource Name (ARN) of the SSM document to use to create a conformance pack. If you use the document name, AWS Config checks only your account and AWS Region for the SSM document.", "title": "DocumentName", "type": "string" }, @@ -49217,7 +49215,7 @@ "additionalProperties": false, "properties": { "AfterContactWorkTimeLimit": { - "markdownDescription": "The After Call Work (ACW) timeout setting, in seconds.\n\n> When returned by a `SearchUsers` call, `AfterContactWorkTimeLimit` is returned in milliseconds.", + "markdownDescription": "The After Call Work (ACW) timeout setting, in seconds. This parameter has a minimum value of 0 and a maximum value of 2,000,000 seconds (24 days). Enter 0 if you don't want to allocate a specific amount of ACW time. It essentially means an indefinite amount of time. When the conversation ends, ACW starts; the agent must choose Close contact to end ACW.\n\n> When returned by a `SearchUsers` call, `AfterContactWorkTimeLimit` is returned in milliseconds.", "title": "AfterContactWorkTimeLimit", "type": "number" }, @@ -49774,7 +49772,7 @@ "additionalProperties": false, "properties": { "ControlIdentifier": { - "markdownDescription": "The ARN of the control. Only *Strongly recommended* and *Elective* controls are permitted, with the exception of the *landing zone Region deny* control. For information on how to find the `controlIdentifier` , see [the overview page](https://docs.aws.amazon.com//controltower/latest/APIReference/Welcome.html) .", + "markdownDescription": "The ARN of the control. Only *Strongly recommended* and *Elective* controls are permitted, with the exception of the *Region deny* control. For information on how to find the `controlIdentifier` , see [the overview page](https://docs.aws.amazon.com//controltower/latest/APIReference/Welcome.html) .", "title": "ControlIdentifier", "type": "string" }, @@ -61409,7 +61407,7 @@ "type": "array" }, "ProjectionType": { - "markdownDescription": "The set of attributes that are projected into the index:\n\n- `KEYS_ONLY` - Only the index and primary keys are projected into the index.\n- `INCLUDE` - In addition to the attributes described in `KEYS_ONLY` , the secondary index will include other non-key attributes that you specify.\n- `ALL` - All of the table attributes are projected into the index.", + "markdownDescription": "The set of attributes that are projected into the index:\n\n- `KEYS_ONLY` - Only the index and primary keys are projected into the index.\n- `INCLUDE` - In addition to the attributes described in `KEYS_ONLY` , the secondary index will include other non-key attributes that you specify.\n- `ALL` - All of the table attributes are projected into the index.\n\nWhen using the DynamoDB console, `ALL` is selected by default.", "title": "ProjectionType", "type": "string" } @@ -62008,7 +62006,7 @@ "type": "array" }, "ProjectionType": { - "markdownDescription": "The set of attributes that are projected into the index:\n\n- `KEYS_ONLY` - Only the index and primary keys are projected into the index.\n- `INCLUDE` - In addition to the attributes described in `KEYS_ONLY` , the secondary index will include other non-key attributes that you specify.\n- `ALL` - All of the table attributes are projected into the index.", + "markdownDescription": "The set of attributes that are projected into the index:\n\n- `KEYS_ONLY` - Only the index and primary keys are projected into the index.\n- `INCLUDE` - In addition to the attributes described in `KEYS_ONLY` , the secondary index will include other non-key attributes that you specify.\n- `ALL` - All of the table attributes are projected into the index.\n\nWhen using the DynamoDB console, `ALL` is selected by default.", "title": "ProjectionType", "type": "string" } @@ -63529,7 +63527,7 @@ "type": "string" }, "WeightedCapacity": { - "markdownDescription": "The number of units provided by the specified instance type.", + "markdownDescription": "The number of units provided by the specified instance type.\n\n> When specifying weights, the price used in the `lowest-price` and `price-capacity-optimized` allocation strategies is per *unit* hour (where the instance price is divided by the specified weight). However, if all the specified weights are above the requested `TargetCapacity` , resulting in only 1 instance being launched, the price used is per *instance* hour.", "title": "WeightedCapacity", "type": "number" } @@ -63688,7 +63686,7 @@ "type": "boolean" }, "SpotMaxPricePercentageOverLowestPrice": { - "markdownDescription": "[Price protection] The price protection threshold for Spot Instances, as a percentage higher than an identified Spot price. The identified Spot price is the Spot price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified Spot price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose Spot price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nTo indicate no price protection threshold, specify a high value, such as `999999` .\n\nIf you set `TargetCapacityUnitType` to `vcpu` or `memory-mib` , the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price.\n\nThis parameter is not supported for [GetSpotPlacementScores](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetSpotPlacementScores.html) and [GetInstanceTypesFromInstanceRequirements](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetInstanceTypesFromInstanceRequirements.html) .\n\n> Only one of `SpotMaxPricePercentageOverLowestPrice` or `MaxSpotPriceAsPercentageOfOptimalOnDemandPrice` can be specified. If you don't specify either, then `SpotMaxPricePercentageOverLowestPrice` is used and the value for that parameter defaults to `100` . \n\nDefault: `100`", + "markdownDescription": "[Price protection] The price protection threshold for Spot Instances, as a percentage higher than an identified Spot price. The identified Spot price is the Spot price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified Spot price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose Spot price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nIf you set `TargetCapacityUnitType` to `vcpu` or `memory-mib` , the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price.\n\nThis parameter is not supported for [GetSpotPlacementScores](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetSpotPlacementScores.html) and [GetInstanceTypesFromInstanceRequirements](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetInstanceTypesFromInstanceRequirements.html) .\n\n> Only one of `SpotMaxPricePercentageOverLowestPrice` or `MaxSpotPriceAsPercentageOfOptimalOnDemandPrice` can be specified. If you don't specify either, Amazon EC2 will automatically apply optimal price protection to consistently select from a wide range of instance types. To indicate no price protection threshold for Spot Instances, meaning you want to consider all instance types that match your attributes, include one of these parameters and specify a high value, such as `999999` . \n\nDefault: `100`", "title": "SpotMaxPricePercentageOverLowestPrice", "type": "number" }, @@ -66789,7 +66787,7 @@ "type": "boolean" }, "SpotMaxPricePercentageOverLowestPrice": { - "markdownDescription": "[Price protection] The price protection threshold for Spot Instances, as a percentage higher than an identified Spot price. The identified Spot price is the Spot price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified Spot price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose Spot price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nTo indicate no price protection threshold, specify a high value, such as `999999` .\n\nIf you set `TargetCapacityUnitType` to `vcpu` or `memory-mib` , the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price.\n\nThis parameter is not supported for [GetSpotPlacementScores](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetSpotPlacementScores.html) and [GetInstanceTypesFromInstanceRequirements](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetInstanceTypesFromInstanceRequirements.html) .\n\n> Only one of `SpotMaxPricePercentageOverLowestPrice` or `MaxSpotPriceAsPercentageOfOptimalOnDemandPrice` can be specified. If you don't specify either, then `SpotMaxPricePercentageOverLowestPrice` is used and the value for that parameter defaults to `100` . \n\nDefault: `100`", + "markdownDescription": "[Price protection] The price protection threshold for Spot Instances, as a percentage higher than an identified Spot price. The identified Spot price is the Spot price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified Spot price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose Spot price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nIf you set `TargetCapacityUnitType` to `vcpu` or `memory-mib` , the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price.\n\nThis parameter is not supported for [GetSpotPlacementScores](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetSpotPlacementScores.html) and [GetInstanceTypesFromInstanceRequirements](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetInstanceTypesFromInstanceRequirements.html) .\n\n> Only one of `SpotMaxPricePercentageOverLowestPrice` or `MaxSpotPriceAsPercentageOfOptimalOnDemandPrice` can be specified. If you don't specify either, Amazon EC2 will automatically apply optimal price protection to consistently select from a wide range of instance types. To indicate no price protection threshold for Spot Instances, meaning you want to consider all instance types that match your attributes, include one of these parameters and specify a high value, such as `999999` . \n\nDefault: `100`", "title": "SpotMaxPricePercentageOverLowestPrice", "type": "number" }, @@ -71278,7 +71276,7 @@ "type": "boolean" }, "SpotMaxPricePercentageOverLowestPrice": { - "markdownDescription": "[Price protection] The price protection threshold for Spot Instances, as a percentage higher than an identified Spot price. The identified Spot price is the Spot price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified Spot price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose Spot price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nTo indicate no price protection threshold, specify a high value, such as `999999` .\n\nIf you set `TargetCapacityUnitType` to `vcpu` or `memory-mib` , the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price.\n\nThis parameter is not supported for [GetSpotPlacementScores](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetSpotPlacementScores.html) and [GetInstanceTypesFromInstanceRequirements](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetInstanceTypesFromInstanceRequirements.html) .\n\n> Only one of `SpotMaxPricePercentageOverLowestPrice` or `MaxSpotPriceAsPercentageOfOptimalOnDemandPrice` can be specified. If you don't specify either, then `SpotMaxPricePercentageOverLowestPrice` is used and the value for that parameter defaults to `100` . \n\nDefault: `100`", + "markdownDescription": "[Price protection] The price protection threshold for Spot Instances, as a percentage higher than an identified Spot price. The identified Spot price is the Spot price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified Spot price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose Spot price exceeds your specified threshold.\n\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.\n\nIf you set `TargetCapacityUnitType` to `vcpu` or `memory-mib` , the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price.\n\nThis parameter is not supported for [GetSpotPlacementScores](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetSpotPlacementScores.html) and [GetInstanceTypesFromInstanceRequirements](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetInstanceTypesFromInstanceRequirements.html) .\n\n> Only one of `SpotMaxPricePercentageOverLowestPrice` or `MaxSpotPriceAsPercentageOfOptimalOnDemandPrice` can be specified. If you don't specify either, Amazon EC2 will automatically apply optimal price protection to consistently select from a wide range of instance types. To indicate no price protection threshold for Spot Instances, meaning you want to consider all instance types that match your attributes, include one of these parameters and specify a high value, such as `999999` . \n\nDefault: `100`", "title": "SpotMaxPricePercentageOverLowestPrice", "type": "number" }, @@ -71348,7 +71346,7 @@ "type": "string" }, "WeightedCapacity": { - "markdownDescription": "The number of units provided by the specified instance type.", + "markdownDescription": "The number of units provided by the specified instance type.\n\n> When specifying weights, the price used in the `lowest-price` and `price-capacity-optimized` allocation strategies is per *unit* hour (where the instance price is divided by the specified weight). However, if all the specified weights are above the requested `TargetCapacity` , resulting in only 1 instance being launched, the price used is per *instance* hour.", "title": "WeightedCapacity", "type": "number" } @@ -83508,11 +83506,13 @@ }, "Properties": { "additionalProperties": true, + "markdownDescription": "", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" } }, + "title": "Properties", "type": "object" } }, @@ -83861,7 +83861,7 @@ "type": "array" }, "PreferredMaintenanceWindow": { - "markdownDescription": "Specifies the weekly time range during which maintenance on the cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid values for `ddd` are:\n\nSpecifies the weekly time range during which maintenance on the cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period.\n\nValid values for `ddd` are:\n\n- `sun`\n- `mon`\n- `tue`\n- `wed`\n- `thu`\n- `fri`\n- `sat`\n\nExample: `sun:23:00-mon:01:30`", + "markdownDescription": "Specifies the weekly time range during which maintenance on the cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period.\n\nValid values for `ddd` are:\n\n- `sun`\n- `mon`\n- `tue`\n- `wed`\n- `thu`\n- `fri`\n- `sat`\n\nExample: `sun:23:00-mon:01:30`", "title": "PreferredMaintenanceWindow", "type": "string" }, @@ -84877,7 +84877,7 @@ }, "Endpoint": { "$ref": "#/definitions/AWS::ElastiCache::ServerlessCache.Endpoint", - "markdownDescription": "Represents the information required for client programs to connect to a cache node.", + "markdownDescription": "Represents the information required for client programs to connect to a cache node. This value is read-only.", "title": "Endpoint" }, "Engine": { @@ -84902,7 +84902,7 @@ }, "ReaderEndpoint": { "$ref": "#/definitions/AWS::ElastiCache::ServerlessCache.Endpoint", - "markdownDescription": "Represents the information required for client programs to connect to a cache node.", + "markdownDescription": "Represents the information required for client programs to connect to a cache node. This value is read-only.", "title": "ReaderEndpoint" }, "SecurityGroupIds": { @@ -85177,7 +85177,7 @@ }, "AuthenticationMode": { "$ref": "#/definitions/AWS::ElastiCache::User.AuthenticationMode", - "markdownDescription": "Specifies the authentication mode to use. Below is an example of the possible JSON values:\n\n```\n{ Type: Passwords: [\"*****\", \"******\"] // If Type is password.\n}\n```", + "markdownDescription": "Specifies the authentication mode to use. Below is an example of the possible JSON values:\n\n```\n{ Passwords: [\"*****\", \"******\"] // If Type is password.\n}\n```", "title": "AuthenticationMode" }, "Engine": { @@ -85983,7 +85983,7 @@ "items": { "type": "string" }, - "markdownDescription": "The Availability Zones for the load balancer. For load balancers in a VPC, specify `Subnets` instead.\n\nUpdate requires replacement if you did not previously specify an Availability Zone or if you are removing all Availability Zones. Otherwise, update requires no interruption.", + "markdownDescription": "The Availability Zones for a load balancer in a default VPC. For a load balancer in a nondefault VPC, specify `Subnets` instead.\n\nUpdate requires replacement if you did not previously specify an Availability Zone or if you are removing all Availability Zones. Otherwise, update requires no interruption.", "title": "AvailabilityZones", "type": "array" }, @@ -86406,7 +86406,7 @@ "type": "string" }, "SslPolicy": { - "markdownDescription": "[HTTPS and TLS listeners] The security policy that defines which protocols and ciphers are supported.\n\nFor more information, see [Security policies](https://docs.aws.amazon.com/elasticloadbalancing/latest/application/create-https-listener.html#describe-ssl-policies) in the *Application Load Balancers Guide* and [Security policies](https://docs.aws.amazon.com/elasticloadbalancing/latest/network/create-tls-listener.html#describe-ssl-policies) in the *Network Load Balancers Guide* .", + "markdownDescription": "[HTTPS and TLS listeners] The security policy that defines which protocols and ciphers are supported.\n\nUpdating the security policy can result in interruptions if the load balancer is handling a high volume of traffic.\n\nFor more information, see [Security policies](https://docs.aws.amazon.com/elasticloadbalancing/latest/application/create-https-listener.html#describe-ssl-policies) in the *Application Load Balancers Guide* and [Security policies](https://docs.aws.amazon.com/elasticloadbalancing/latest/network/create-tls-listener.html#describe-ssl-policies) in the *Network Load Balancers Guide* .", "title": "SslPolicy", "type": "string" } @@ -92073,11 +92073,13 @@ }, "Parameters": { "additionalProperties": true, + "markdownDescription": "The parameters for the action.", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" } }, + "title": "Parameters", "type": "object" }, "StartAfter": { @@ -92090,11 +92092,13 @@ }, "Targets": { "additionalProperties": true, + "markdownDescription": "The targets for the action.", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" } }, + "title": "Targets", "type": "object" } }, @@ -92124,7 +92128,7 @@ "properties": { "CloudWatchLogsConfiguration": { "$ref": "#/definitions/AWS::FIS::ExperimentTemplate.CloudWatchLogsConfiguration", - "markdownDescription": "The configuration for experiment logging to Amazon CloudWatch Logs.", + "markdownDescription": "The configuration for experiment logging to CloudWatch Logs .", "title": "CloudWatchLogsConfiguration" }, "LogSchemaVersion": { @@ -92134,7 +92138,7 @@ }, "S3Configuration": { "$ref": "#/definitions/AWS::FIS::ExperimentTemplate.S3Configuration", - "markdownDescription": "The configuration for experiment logging to Amazon S3.", + "markdownDescription": "The configuration for experiment logging to Amazon S3 .", "title": "S3Configuration" } }, @@ -92175,7 +92179,7 @@ }, "Parameters": { "additionalProperties": true, - "markdownDescription": "The resource type parameters.", + "markdownDescription": "The parameters for the resource type.", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" @@ -93976,7 +93980,7 @@ "type": "string" }, "SecurityStyle": { - "markdownDescription": "Specifies the security style for the volume. If a volume's security style is not specified, it is automatically set to the root volume's security style. The security style determines the type of permissions that FSx for ONTAP uses to control data access. For more information, see [Volume security style](https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/managing-volumes.html#volume-security-style) in the *Amazon FSx for NetApp ONTAP User Guide* . Specify one of the following values:\n\n- `UNIX` if the file system is managed by a UNIX administrator, the majority of users are NFS clients, and an application accessing the data uses a UNIX user as the service account.\n- `NTFS` if the file system is managed by a Windows administrator, the majority of users are SMB clients, and an application accessing the data uses a Windows user as the service account.\n- `MIXED` if the file system is managed by both UNIX and Windows administrators and users consist of both NFS and SMB clients.", + "markdownDescription": "Specifies the security style for the volume. If a volume's security style is not specified, it is automatically set to the root volume's security style. The security style determines the type of permissions that FSx for ONTAP uses to control data access. For more information, see [Volume security style](https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/volume-security-style) in the *Amazon FSx for NetApp ONTAP User Guide* . Specify one of the following values:\n\n- `UNIX` if the file system is managed by a UNIX administrator, the majority of users are NFS clients, and an application accessing the data uses a UNIX user as the service account.\n- `NTFS` if the file system is managed by a Windows administrator, the majority of users are SMB clients, and an application accessing the data uses a Windows user as the service account.\n- `MIXED` This is an advanced setting. For more information, see the topic [What the security styles and their effects are](https://docs.aws.amazon.com/https://docs.netapp.com/us-en/ontap/nfs-admin/security-styles-their-effects-concept.html) in the NetApp Documentation Center.\n\nFor more information, see [Volume security style](https://docs.aws.amazon.com/fsx/latest/ONTAPGuide/volume-security-style.html) in the FSx for ONTAP User Guide.", "title": "SecurityStyle", "type": "string" }, @@ -93986,7 +93990,7 @@ "type": "string" }, "SizeInMegabytes": { - "markdownDescription": "*This property has been deprecated. Use `SizeInBytes` .*\n\nSpecifies the size of the volume, in megabytes (MB), that you are creating.", + "markdownDescription": "Use `SizeInBytes` instead. Specifies the size of the volume, in megabytes (MB), that you are creating.", "title": "SizeInMegabytes", "type": "string" }, @@ -97787,7 +97791,7 @@ "items": { "type": "string" }, - "markdownDescription": "", + "markdownDescription": "Indicates whether the CSV file contains custom data types.", "title": "ContainsCustomDatatype", "type": "array" }, @@ -97797,7 +97801,7 @@ "type": "string" }, "CustomDatatypeConfigured": { - "markdownDescription": "Enables the custom datatype to be configured.", + "markdownDescription": "Enables the configuration of custom data types.", "title": "CustomDatatypeConfigured", "type": "boolean" }, @@ -98262,7 +98266,7 @@ "items": { "type": "string" }, - "markdownDescription": "A list of glob patterns used to exclude from the crawl. For more information, see [Catalog Tables with a Crawler](https://docs.aws.amazon.com/glue/latest/dg/add-crawler.html) .", + "markdownDescription": "A list of global patterns used to exclude from the crawl.", "title": "Exclusions", "type": "array" }, @@ -98275,7 +98279,7 @@ "items": { "type": "string" }, - "markdownDescription": "One or more Amazon S3 paths that contains Iceberg metadata folders as `s3://bucket/prefix` .", + "markdownDescription": "One or more Amazon S3 paths that contains Iceberg metadata folders as s3://bucket/prefix .", "title": "Paths", "type": "array" } @@ -98430,7 +98434,7 @@ "items": { "$ref": "#/definitions/AWS::Glue::Crawler.IcebergTarget" }, - "markdownDescription": "", + "markdownDescription": "Specifies Apache Iceberg data store targets.", "title": "IcebergTargets", "type": "array" }, @@ -98515,7 +98519,7 @@ "type": "string" }, "Tags": { - "markdownDescription": "", + "markdownDescription": "AWS tags that contain a key value pair and may be searched by console, command line, or API.", "title": "Tags", "type": "object" } @@ -98865,7 +98869,7 @@ "type": "string" }, "Region": { - "markdownDescription": "Region of the target database.", + "markdownDescription": "The Region of the database.", "title": "Region", "type": "string" } @@ -100487,7 +100491,7 @@ }, "OpenTableFormatInput": { "$ref": "#/definitions/AWS::Glue::Table.OpenTableFormatInput", - "markdownDescription": "A structure representing an open format table.", + "markdownDescription": "Specifies an `OpenTableFormatInput` structure when creating an open format table.", "title": "OpenTableFormatInput" }, "TableInput": { @@ -100553,7 +100557,7 @@ "properties": { "MetadataOperation": { "$ref": "#/definitions/AWS::Glue::Table.MetadataOperation", - "markdownDescription": "A required metadata operation. Can only be set to `CREATE` .", + "markdownDescription": "A required metadata operation. Can only be set to CREATE.", "title": "MetadataOperation" }, "Version": { @@ -100789,7 +100793,7 @@ "type": "string" }, "Region": { - "markdownDescription": "Region of the target table.", + "markdownDescription": "The Region of the table.", "title": "Region", "type": "string" } @@ -105347,12 +105351,12 @@ "additionalProperties": false, "properties": { "ContactPostPassDurationSeconds": { - "markdownDescription": "Amount of time in seconds after a contact ends that you\u2019d like to receive a CloudWatch Event indicating the pass has finished. For more information on CloudWatch Events, see the [What Is CloudWatch Events?](https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/WhatIsCloudWatchEvents.html)", + "markdownDescription": "Amount of time in seconds after a contact ends that you\u2019d like to receive a Ground Station Contact State Change indicating the pass has finished.", "title": "ContactPostPassDurationSeconds", "type": "number" }, "ContactPrePassDurationSeconds": { - "markdownDescription": "Amount of time in seconds prior to contact start that you'd like to receive a CloudWatch Event indicating an upcoming pass. For more information on CloudWatch Events, see the [What Is CloudWatch Events?](https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/WhatIsCloudWatchEvents.html)", + "markdownDescription": "Amount of time in seconds prior to contact start that you'd like to receive a Ground Station Contact State Change Event indicating an upcoming pass.", "title": "ContactPrePassDurationSeconds", "type": "number" }, @@ -106060,6 +106064,8 @@ "type": "string" }, "MasterId": { + "markdownDescription": "The AWS account ID of the account designated as the GuardDuty administrator account.", + "title": "MasterId", "type": "string" } }, @@ -106141,6 +106147,8 @@ "type": "string" }, "MemberId": { + "markdownDescription": "The AWS account ID of the account to designate as a member.", + "title": "MemberId", "type": "string" }, "Message": { @@ -109700,17 +109708,17 @@ "additionalProperties": false, "properties": { "ContainerRecipeArn": { - "markdownDescription": "The Amazon Resource Name (ARN) of the container recipe that is used for this pipeline.", + "markdownDescription": "The Amazon Resource Name (ARN) of the container recipe that defines how images are configured and tested.", "title": "ContainerRecipeArn", "type": "string" }, "DistributionConfigurationArn": { - "markdownDescription": "The Amazon Resource Name (ARN) of the distribution configuration.", + "markdownDescription": "The Amazon Resource Name (ARN) of the distribution configuration that defines and configures the outputs of your pipeline.", "title": "DistributionConfigurationArn", "type": "string" }, "EnhancedImageMetadataEnabled": { - "markdownDescription": "Indicates whether Image Builder collects additional information about the image, such as the operating system (OS) version and package list.", + "markdownDescription": "Collects additional information about the image being created, including the operating system (OS) version and package list. This information is used to enhance the overall experience of using EC2 Image Builder. Enabled by default.", "title": "EnhancedImageMetadataEnabled", "type": "boolean" }, @@ -109720,7 +109728,7 @@ "type": "string" }, "ImageRecipeArn": { - "markdownDescription": "The Amazon Resource Name (ARN) of the image recipe.", + "markdownDescription": "The Amazon Resource Name (ARN) of the image recipe that defines how images are configured, tested, and assessed.", "title": "ImageRecipeArn", "type": "string" }, @@ -109731,17 +109739,17 @@ }, "ImageTestsConfiguration": { "$ref": "#/definitions/AWS::ImageBuilder::Image.ImageTestsConfiguration", - "markdownDescription": "The configuration settings for your image test components, which includes a toggle that allows you to turn off tests, and a timeout setting.", + "markdownDescription": "The image tests configuration of the image.", "title": "ImageTestsConfiguration" }, "InfrastructureConfigurationArn": { - "markdownDescription": "The Amazon Resource Name (ARN) of the infrastructure configuration associated with this image pipeline.", + "markdownDescription": "The Amazon Resource Name (ARN) of the infrastructure configuration that defines the environment in which your image will be built and tested.", "title": "InfrastructureConfigurationArn", "type": "string" }, "Tags": { "additionalProperties": true, - "markdownDescription": "The tags that apply to this image.", + "markdownDescription": "The tags of the image.", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" @@ -109754,7 +109762,7 @@ "items": { "$ref": "#/definitions/AWS::ImageBuilder::Image.WorkflowConfiguration" }, - "markdownDescription": "Contains the build and test workflows that are associated with the image.", + "markdownDescription": "Contains an array of workflow configuration objects.", "title": "Workflows", "type": "array" } @@ -115486,7 +115494,7 @@ "additionalProperties": false, "properties": { "RuleName": { - "markdownDescription": "The name of the rule.", + "markdownDescription": "The name of the rule.\n\n*Pattern* : `[a-zA-Z0-9:_-]+`", "title": "RuleName", "type": "string" }, @@ -118730,7 +118738,7 @@ }, "Payload": { "$ref": "#/definitions/AWS::IoTEvents::AlarmModel.Payload", - "markdownDescription": "You can configure the action payload when you send a message to an Amazon Kinesis Data Firehose delivery stream.", + "markdownDescription": "You can configure the action payload when you send a message to an Amazon Data Firehose delivery stream.", "title": "Payload" }, "Separator": { @@ -119330,7 +119338,7 @@ }, "Payload": { "$ref": "#/definitions/AWS::IoTEvents::DetectorModel.Payload", - "markdownDescription": "You can configure the action payload when you send a message to an Amazon Kinesis Data Firehose delivery stream.", + "markdownDescription": "You can configure the action payload when you send a message to an Amazon Data Firehose delivery stream.", "title": "Payload" }, "Separator": { @@ -126547,7 +126555,7 @@ }, "S3Configuration": { "$ref": "#/definitions/AWS::Kendra::DataSource.S3DataSourceConfiguration", - "markdownDescription": "Provides the configuration information to connect to an Amazon S3 bucket as your data source.", + "markdownDescription": "Provides the configuration information to connect to an Amazon S3 bucket as your data source.\n\n> Amazon Kendra now supports an upgraded Amazon S3 connector.\n> \n> You must now use the [TemplateConfiguration](https://docs.aws.amazon.com/kendra/latest/APIReference/API_TemplateConfiguration.html) object instead of the `S3DataSourceConfiguration` object to configure your connector.\n> \n> Connectors configured using the older console and API architecture will continue to function as configured. However, you won't be able to edit or update them. If you want to edit or update your connector configuration, you must create a new connector.\n> \n> We recommended migrating your connector workflow to the upgraded version. Support for connectors configured using the older architecture is scheduled to end by June 2024.", "title": "S3Configuration" }, "SalesforceConfiguration": { @@ -126988,7 +126996,7 @@ "items": { "type": "string" }, - "markdownDescription": "A list of glob patterns (patterns that can expand a wildcard pattern into a list of path names that match the given pattern) for file names and file types that should not be indexed. If a document that matches an inclusion prefix or inclusion pattern also matches an exclusion pattern, the document is not indexed. Examples of glob patterns include:\n\n- `/myapp/config/*` - All files inside config directory\n- `/**/*.png` - All .png files in all directories\n- `/**/*.{png,ico,md}` - All .png, .ico or .md files in all directories\n- `/myapp/src/**/*.ts` - All .ts files inside src directory (and all its subdirectories)\n- `**/!(*.module).ts` - All .ts files but not .module.ts\n- **.png , *.jpg* will exclude all PNG and JPEG image files in a directory (files with the extensions .png and .jpg).\n- **internal** will exclude all files in a directory that contain 'internal' in the file name, such as 'internal', 'internal_only', 'company_internal'.\n- ***/*internal** will exclude all internal-related files in a directory and its subdirectories.\n\nFor more examples, see [Use of Exclude and Include Filters](https://docs.aws.amazon.com/cli/latest/reference/s3/#use-of-exclude-and-include-filters) in the AWS CLI Command Reference.", + "markdownDescription": "A list of glob patterns (patterns that can expand a wildcard pattern into a list of path names that match the given pattern) for certain file names and file types to exclude from your index. If a document matches both an inclusion and exclusion prefix or pattern, the exclusion prefix takes precendence and the document is not indexed. Examples of glob patterns include:\n\n- `/myapp/config/*` - All files inside config directory\n- `/**/*.png` - All .png files in all directories\n- `/**/*.{png,ico,md}` - All .png, .ico or .md files in all directories\n- `/myapp/src/**/*.ts` - All .ts files inside src directory (and all its subdirectories)\n- `**/!(*.module).ts` - All .ts files but not .module.ts\n- **.png , *.jpg* excludes all PNG and JPEG image files in a directory (files with the extensions .png and .jpg).\n- **internal** excludes all files in a directory that contain 'internal' in the file name, such as 'internal', 'internal_only', 'company_internal'.\n- ***/*internal** excludes all internal-related files in a directory and its subdirectories.\n\nFor more examples, see [Use of Exclude and Include Filters](https://docs.aws.amazon.com/cli/latest/reference/s3/#use-of-exclude-and-include-filters) in the AWS CLI Command Reference.", "title": "ExclusionPatterns", "type": "array" }, @@ -126996,7 +127004,7 @@ "items": { "type": "string" }, - "markdownDescription": "A list of glob patterns for documents that should be indexed. If a document that matches an inclusion pattern also matches an exclusion pattern, the document is not indexed.\n\nSome [examples](https://docs.aws.amazon.com/cli/latest/reference/s3/#use-of-exclude-and-include-filters) are:\n\n- **.txt* will include all text files in a directory (files with the extension .txt).\n- ***/*.txt* will include all text files in a directory and its subdirectories.\n- **tax** will include all files in a directory that contain 'tax' in the file name, such as 'tax', 'taxes', 'income_tax'.", + "markdownDescription": "A list of glob patterns (patterns that can expand a wildcard pattern into a list of path names that match the given pattern) for certain file names and file types to include in your index. If a document matches both an inclusion and exclusion prefix or pattern, the exclusion prefix takes precendence and the document is not indexed. Examples of glob patterns include:\n\n- `/myapp/config/*` - All files inside config directory\n- `/**/*.png` - All .png files in all directories\n- `/**/*.{png,ico,md}` - All .png, .ico or .md files in all directories\n- `/myapp/src/**/*.ts` - All .ts files inside src directory (and all its subdirectories)\n- `**/!(*.module).ts` - All .ts files but not .module.ts\n- **.png , *.jpg* includes all PNG and JPEG image files in a directory (files with the extensions .png and .jpg).\n- **internal** includes all files in a directory that contain 'internal' in the file name, such as 'internal', 'internal_only', 'company_internal'.\n- ***/*internal** includes all internal-related files in a directory and its subdirectories.\n\nFor more examples, see [Use of Exclude and Include Filters](https://docs.aws.amazon.com/cli/latest/reference/s3/#use-of-exclude-and-include-filters) in the AWS CLI Command Reference.", "title": "InclusionPatterns", "type": "array" }, @@ -138573,7 +138581,7 @@ }, "PrivateRegistryAccess": { "$ref": "#/definitions/AWS::Lightsail::Container.PrivateRegistryAccess", - "markdownDescription": "An object that describes the configuration for the container service to access private container image repositories, such as Amazon Elastic Container Registry ( Amazon ECR ) private repositories.\n\nFor more information, see [Configuring access to an Amazon ECR private repository for an Amazon Lightsail container service](https://docs.aws.amazon.com/latest/userguide/amazon-lightsail-container-service-ecr-private-repo-access) in the *Amazon Lightsail Developer Guide* .", + "markdownDescription": "An object that describes the configuration for the container service to access private container image repositories, such as Amazon Elastic Container Registry ( Amazon ECR ) private repositories.\n\nFor more information, see [Configuring access to an Amazon ECR private repository for an Amazon Lightsail container service](https://docs.aws.amazon.com/lightsail/latest/userguide/amazon-lightsail-container-service-ecr-private-repo-access) in the *Amazon Lightsail Developer Guide* .", "title": "PrivateRegistryAccess" }, "PublicDomainNames": { @@ -140829,7 +140837,7 @@ "additionalProperties": false, "properties": { "PolicyDocument": { - "markdownDescription": "Specify the policy, in JSON.\n\n*Data protection policy*\n\nA data protection policy must include two JSON blocks:\n\n- The first block must include both a `DataIdentifer` array and an `Operation` property with an `Audit` action. The `DataIdentifer` array lists the types of sensitive data that you want to mask. For more information about the available options, see [Types of data that you can mask](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/mask-sensitive-log-data-types.html) .\n\nThe `Operation` property with an `Audit` action is required to find the sensitive data terms. This `Audit` action must contain a `FindingsDestination` object. You can optionally use that `FindingsDestination` object to list one or more destinations to send audit findings to. If you specify destinations such as log groups, Kinesis Data Firehose streams, and S3 buckets, they must already exist.\n- The second block must include both a `DataIdentifer` array and an `Operation` property with an `Deidentify` action. The `DataIdentifer` array must exactly match the `DataIdentifer` array in the first block of the policy.\n\nThe `Operation` property with the `Deidentify` action is what actually masks the data, and it must contain the `\"MaskConfig\": {}` object. The `\"MaskConfig\": {}` object must be empty.\n\n> The contents of the two `DataIdentifer` arrays must match exactly. \n\nIn addition to the two JSON blocks, the `policyDocument` can also include `Name` , `Description` , and `Version` fields. The `Name` is different than the operation's `policyName` parameter, and is used as a dimension when CloudWatch Logs reports audit findings metrics to CloudWatch .\n\nThe JSON specified in `policyDocument` can be up to 30,720 characters long.\n\n*Subscription filter policy*\n\nA subscription filter policy can include the following attributes in a JSON block:\n\n- *DestinationArn* The ARN of the destination to deliver log events to. Supported destinations are:\n\n- An Kinesis Data Streams data stream in the same account as the subscription policy, for same-account delivery.\n- An Kinesis Data Firehose data stream in the same account as the subscription policy, for same-account delivery.\n- A Lambda function in the same account as the subscription policy, for same-account delivery.\n- A logical destination in a different account created with [PutDestination](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDestination.html) , for cross-account delivery. Kinesis Data Streams and Kinesis Data Firehose are supported as logical destinations.\n- *RoleArn* The ARN of an IAM role that grants CloudWatch Logs permissions to deliver ingested log events to the destination stream. You don't need to provide the ARN when you are working with a logical destination for cross-account delivery.\n- *FilterPattern* A filter pattern for subscribing to a filtered stream of log events.\n- *Distribution* The method used to distribute log data to the destination. By default, log data is grouped by log stream, but the grouping can be set to `Random` for a more even distribution. This property is only applicable when the destination is an Kinesis Data Streams data stream.", + "markdownDescription": "Specify the policy, in JSON.\n\n*Data protection policy*\n\nA data protection policy must include two JSON blocks:\n\n- The first block must include both a `DataIdentifer` array and an `Operation` property with an `Audit` action. The `DataIdentifer` array lists the types of sensitive data that you want to mask. For more information about the available options, see [Types of data that you can mask](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/mask-sensitive-log-data-types.html) .\n\nThe `Operation` property with an `Audit` action is required to find the sensitive data terms. This `Audit` action must contain a `FindingsDestination` object. You can optionally use that `FindingsDestination` object to list one or more destinations to send audit findings to. If you specify destinations such as log groups, Firehose streams, and S3 buckets, they must already exist.\n- The second block must include both a `DataIdentifer` array and an `Operation` property with an `Deidentify` action. The `DataIdentifer` array must exactly match the `DataIdentifer` array in the first block of the policy.\n\nThe `Operation` property with the `Deidentify` action is what actually masks the data, and it must contain the `\"MaskConfig\": {}` object. The `\"MaskConfig\": {}` object must be empty.\n\n> The contents of the two `DataIdentifer` arrays must match exactly. \n\nIn addition to the two JSON blocks, the `policyDocument` can also include `Name` , `Description` , and `Version` fields. The `Name` is different than the operation's `policyName` parameter, and is used as a dimension when CloudWatch Logs reports audit findings metrics to CloudWatch .\n\nThe JSON specified in `policyDocument` can be up to 30,720 characters long.\n\n*Subscription filter policy*\n\nA subscription filter policy can include the following attributes in a JSON block:\n\n- *DestinationArn* The ARN of the destination to deliver log events to. Supported destinations are:\n\n- An Kinesis Data Streams data stream in the same account as the subscription policy, for same-account delivery.\n- An Firehose data stream in the same account as the subscription policy, for same-account delivery.\n- A Lambda function in the same account as the subscription policy, for same-account delivery.\n- A logical destination in a different account created with [PutDestination](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDestination.html) , for cross-account delivery. Kinesis Data Streams and Firehose are supported as logical destinations.\n- *RoleArn* The ARN of an IAM role that grants CloudWatch Logs permissions to deliver ingested log events to the destination stream. You don't need to provide the ARN when you are working with a logical destination for cross-account delivery.\n- *FilterPattern* A filter pattern for subscribing to a filtered stream of log events.\n- *Distribution* The method used to distribute log data to the destination. By default, log data is grouped by log stream, but the grouping can be set to `Random` for a more even distribution. This property is only applicable when the destination is an Kinesis Data Streams data stream.", "title": "PolicyDocument", "type": "string" }, @@ -141004,7 +141012,7 @@ "type": "object" }, "DestinationResourceArn": { - "markdownDescription": "The ARN of the AWS destination that this delivery destination represents. That AWS destination can be a log group in CloudWatch Logs, an Amazon S3 bucket, or a delivery stream in Kinesis Data Firehose.", + "markdownDescription": "The ARN of the AWS destination that this delivery destination represents. That AWS destination can be a log group in CloudWatch Logs, an Amazon S3 bucket, or a delivery stream in Firehose.", "title": "DestinationResourceArn", "type": "string" }, @@ -157733,28 +157741,42 @@ "additionalProperties": false, "properties": { "DeletionProtection": { + "markdownDescription": "A value that indicates whether the graph has deletion protection enabled. The graph can't be deleted when deletion protection is enabled.", + "title": "DeletionProtection", "type": "boolean" }, "GraphName": { + "markdownDescription": "The graph name. For example: `my-graph-1` .\n\nThe name must contain from 1 to 63 letters, numbers, or hyphens, and its first character must be a letter. It cannot end with a hyphen or contain two consecutive hyphens.\n\nIf you don't specify a graph name, a unique graph name is generated for you using the prefix `graph-for` , followed by a combination of `Stack Name` and a `UUID` .", + "title": "GraphName", "type": "string" }, "ProvisionedMemory": { + "markdownDescription": "The provisioned memory-optimized Neptune Capacity Units (m-NCUs) to use for the graph.\n\nMin = 128", + "title": "ProvisionedMemory", "type": "number" }, "PublicConnectivity": { + "markdownDescription": "Specifies whether or not the graph can be reachable over the internet. All access to graphs is IAM authenticated.\n\nWhen the graph is publicly available, its domain name system (DNS) endpoint resolves to the public IP address from the internet. When the graph isn't publicly available, you need to create a `PrivateGraphEndpoint` in a given VPC to ensure the DNS name resolves to a private IP address that is reachable from the VPC.\n\nDefault: If not specified, the default value is false.\n\n> If enabling public connectivity for the first time, there will be a delay while it is enabled.", + "title": "PublicConnectivity", "type": "boolean" }, "ReplicaCount": { + "markdownDescription": "The number of replicas in other AZs.\n\nDefault: If not specified, the default value is 1.", + "title": "ReplicaCount", "type": "number" }, "Tags": { "items": { "$ref": "#/definitions/Tag" }, + "markdownDescription": "Adds metadata tags to the new graph. These tags can also be used with cost allocation reporting, or used in a Condition statement in an IAM policy.", + "title": "Tags", "type": "array" }, "VectorSearchConfiguration": { - "$ref": "#/definitions/AWS::NeptuneGraph::Graph.VectorSearchConfiguration" + "$ref": "#/definitions/AWS::NeptuneGraph::Graph.VectorSearchConfiguration", + "markdownDescription": "Specifies the number of dimensions for vector embeddings that will be loaded into the graph. The value is specified as `dimension=` value. Max = 65,535", + "title": "VectorSearchConfiguration" } }, "required": [ @@ -157787,6 +157809,8 @@ "additionalProperties": false, "properties": { "VectorSearchDimension": { + "markdownDescription": "The number of dimensions.", + "title": "VectorSearchDimension", "type": "number" } }, @@ -157831,21 +157855,29 @@ "additionalProperties": false, "properties": { "GraphIdentifier": { + "markdownDescription": "The unique identifier of the Neptune Analytics graph.", + "title": "GraphIdentifier", "type": "string" }, "SecurityGroupIds": { "items": { "type": "string" }, + "markdownDescription": "Security groups to be attached to the private graph endpoint..", + "title": "SecurityGroupIds", "type": "array" }, "SubnetIds": { "items": { "type": "string" }, + "markdownDescription": "Subnets in which private graph endpoint ENIs are created.", + "title": "SubnetIds", "type": "array" }, "VpcId": { + "markdownDescription": "The VPC in which the private graph endpoint needs to be created.", + "title": "VpcId", "type": "string" } }, @@ -158417,7 +158449,7 @@ "properties": { "LogDestination": { "additionalProperties": true, - "markdownDescription": "The named location for the logs, provided in a key:value mapping that is specific to the chosen destination type.\n\n- For an Amazon S3 bucket, provide the name of the bucket, with key `bucketName` , and optionally provide a prefix, with key `prefix` . The following example specifies an Amazon S3 bucket named `DOC-EXAMPLE-BUCKET` and the prefix `alerts` :\n\n`\"LogDestination\": { \"bucketName\": \"DOC-EXAMPLE-BUCKET\", \"prefix\": \"alerts\" }`\n- For a CloudWatch log group, provide the name of the CloudWatch log group, with key `logGroup` . The following example specifies a log group named `alert-log-group` :\n\n`\"LogDestination\": { \"logGroup\": \"alert-log-group\" }`\n- For a Kinesis Data Firehose delivery stream, provide the name of the delivery stream, with key `deliveryStream` . The following example specifies a delivery stream named `alert-delivery-stream` :\n\n`\"LogDestination\": { \"deliveryStream\": \"alert-delivery-stream\" }`", + "markdownDescription": "The named location for the logs, provided in a key:value mapping that is specific to the chosen destination type.\n\n- For an Amazon S3 bucket, provide the name of the bucket, with key `bucketName` , and optionally provide a prefix, with key `prefix` . The following example specifies an Amazon S3 bucket named `DOC-EXAMPLE-BUCKET` and the prefix `alerts` :\n\n`\"LogDestination\": { \"bucketName\": \"DOC-EXAMPLE-BUCKET\", \"prefix\": \"alerts\" }`\n- For a CloudWatch log group, provide the name of the CloudWatch log group, with key `logGroup` . The following example specifies a log group named `alert-log-group` :\n\n`\"LogDestination\": { \"logGroup\": \"alert-log-group\" }`\n- For a Firehose delivery stream, provide the name of the delivery stream, with key `deliveryStream` . The following example specifies a delivery stream named `alert-delivery-stream` :\n\n`\"LogDestination\": { \"deliveryStream\": \"alert-delivery-stream\" }`", "patternProperties": { "^[a-zA-Z0-9]+$": { "type": "string" @@ -158427,7 +158459,7 @@ "type": "object" }, "LogDestinationType": { - "markdownDescription": "The type of storage destination to send these logs to. You can send logs to an Amazon S3 bucket, a CloudWatch log group, or a Kinesis Data Firehose delivery stream.", + "markdownDescription": "The type of storage destination to send these logs to. You can send logs to an Amazon S3 bucket, a CloudWatch log group, or a Firehose delivery stream.", "title": "LogDestinationType", "type": "string" }, @@ -162965,7 +162997,7 @@ "type": "string" }, "StandbyReplicas": { - "markdownDescription": "Indicates whether standby replicas should be used for a collection.", + "markdownDescription": "Indicates whether to use standby replicas for the collection. You can't update this property after the collection is already created. If you attempt to modify this property, the collection continues to use the original value.", "title": "StandbyReplicas", "type": "string" }, @@ -170286,7 +170318,7 @@ "type": "string" }, "DestinationStreamArn": { - "markdownDescription": "The Amazon Resource Name (ARN) of the Amazon Kinesis Data Stream or Amazon Kinesis Data Firehose delivery stream that you want to publish event data to.\n\nFor a Kinesis Data Stream, the ARN format is: `arn:aws:kinesis: region : account-id :stream/ stream_name`\n\nFor a Kinesis Data Firehose delivery stream, the ARN format is: `arn:aws:firehose: region : account-id :deliverystream/ stream_name`", + "markdownDescription": "The Amazon Resource Name (ARN) of the Amazon Kinesis Data Stream or Amazon Data Firehose delivery stream that you want to publish event data to.\n\nFor a Kinesis Data Stream, the ARN format is: `arn:aws:kinesis: region : account-id :stream/ stream_name`\n\nFor a Firehose delivery stream, the ARN format is: `arn:aws:firehose: region : account-id :deliverystream/ stream_name`", "title": "DestinationStreamArn", "type": "string" }, @@ -172579,7 +172611,7 @@ "additionalProperties": false, "properties": { "DeliveryStreamArn": { - "markdownDescription": "The Amazon Resource Name (ARN) of the Kinesis Data Firehose delivery stream to which EventBridge delivers the pipe log records.", + "markdownDescription": "The Amazon Resource Name (ARN) of the Firehose delivery stream to which EventBridge delivers the pipe log records.", "title": "DeliveryStreamArn", "type": "string" } @@ -172689,7 +172721,7 @@ }, "FirehoseLogDestination": { "$ref": "#/definitions/AWS::Pipes::Pipe.FirehoseLogDestination", - "markdownDescription": "The Amazon Kinesis Data Firehose logging configuration settings for the pipe.", + "markdownDescription": "The Amazon Data Firehose logging configuration settings for the pipe.", "title": "FirehoseLogDestination" }, "IncludeExecutionData": { @@ -215146,7 +215178,7 @@ "type": "number" }, "CACertificateIdentifier": { - "markdownDescription": "The identifier of the CA certificate for this DB instance.\n\nSpecifying or updating this property triggers a reboot. For more information about CA certificate identifiers for RDS DB engines, see [Rotating Your SSL/TLS Certificate](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.SSL-certificate-rotation.html) in the *Amazon RDS User Guide* . For more information about CA certificate identifiers for Aurora DB engines, see [Rotating Your SSL/TLS Certificate](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/UsingWithRDS.SSL-certificate-rotation.html) in the *Amazon Aurora User Guide* .", + "markdownDescription": "The identifier of the CA certificate for this DB instance.\n\nFor more information, see [Using SSL/TLS to encrypt a connection to a DB instance](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.SSL.html) in the *Amazon RDS User Guide* and [Using SSL/TLS to encrypt a connection to a DB cluster](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/UsingWithRDS.SSL.html) in the *Amazon Aurora User Guide* .", "title": "CACertificateIdentifier", "type": "string" }, @@ -215181,12 +215213,12 @@ "type": "string" }, "DBClusterSnapshotIdentifier": { - "markdownDescription": "The identifier for the RDS for MySQL Multi-AZ DB cluster snapshot to restore from.\n\nFor more information on Multi-AZ DB clusters, see [Multi-AZ DB cluster deployments](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/multi-az-db-clusters-concepts.html) in the *Amazon RDS User Guide* .\n\nConstraints:\n\n- Must match the identifier of an existing Multi-AZ DB cluster snapshot.\n- Can't be specified when `DBSnapshotIdentifier` is specified.\n- Must be specified when `DBSnapshotIdentifier` isn't specified.\n- If you are restoring from a shared manual Multi-AZ DB cluster snapshot, the `DBClusterSnapshotIdentifier` must be the ARN of the shared snapshot.\n- Can't be the identifier of an Aurora DB cluster snapshot.\n- Can't be the identifier of an RDS for PostgreSQL Multi-AZ DB cluster snapshot.", + "markdownDescription": "The identifier for the Multi-AZ DB cluster snapshot to restore from.\n\nFor more information on Multi-AZ DB clusters, see [Multi-AZ DB cluster deployments](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/multi-az-db-clusters-concepts.html) in the *Amazon RDS User Guide* .\n\nConstraints:\n\n- Must match the identifier of an existing Multi-AZ DB cluster snapshot.\n- Can't be specified when `DBSnapshotIdentifier` is specified.\n- Must be specified when `DBSnapshotIdentifier` isn't specified.\n- If you are restoring from a shared manual Multi-AZ DB cluster snapshot, the `DBClusterSnapshotIdentifier` must be the ARN of the shared snapshot.\n- Can't be the identifier of an Aurora DB cluster snapshot.", "title": "DBClusterSnapshotIdentifier", "type": "string" }, "DBInstanceClass": { - "markdownDescription": "The compute and memory capacity of the DB instance, for example, `db.m4.large` . Not all DB instance classes are available in all AWS Regions, or for all database engines.\n\nFor the full list of DB instance classes, and availability for your engine, see [DB Instance Class](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html) in the *Amazon RDS User Guide.* For more information about DB instance class pricing and AWS Region support for DB instance classes, see [Amazon RDS Pricing](https://docs.aws.amazon.com/rds/pricing/) .", + "markdownDescription": "The compute and memory capacity of the DB instance, for example `db.m5.large` . Not all DB instance classes are available in all AWS Regions , or for all database engines. For the full list of DB instance classes, and availability for your engine, see [DB instance classes](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html) in the *Amazon RDS User Guide* or [Aurora DB instance classes](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Concepts.DBInstanceClass.html) in the *Amazon Aurora User Guide* .", "title": "DBInstanceClass", "type": "string" }, @@ -229900,7 +229932,7 @@ "type": "object" }, "SubscriptionRoleArn": { - "markdownDescription": "This property applies only to Amazon Kinesis Data Firehose delivery stream subscriptions. Specify the ARN of the IAM role that has the following:\n\n- Permission to write to the Amazon Kinesis Data Firehose delivery stream\n- Amazon SNS listed as a trusted entity\n\nSpecifying a valid ARN for this attribute is required for Kinesis Data Firehose delivery stream subscriptions. For more information, see [Fanout to Amazon Kinesis Data Firehose delivery streams](https://docs.aws.amazon.com/sns/latest/dg/sns-firehose-as-subscriber.html) in the *Amazon SNS Developer Guide.*", + "markdownDescription": "This property applies only to Amazon Data Firehose delivery stream subscriptions. Specify the ARN of the IAM role that has the following:\n\n- Permission to write to the Amazon Data Firehose delivery stream\n- Amazon SNS listed as a trusted entity\n\nSpecifying a valid ARN for this attribute is required for Firehose delivery stream subscriptions. For more information, see [Fanout to Amazon Data Firehose delivery streams](https://docs.aws.amazon.com/sns/latest/dg/sns-firehose-as-subscriber.html) in the *Amazon SNS Developer Guide.*", "title": "SubscriptionRoleArn", "type": "string" }, @@ -230646,6 +230678,8 @@ "title": "OutputLocation" }, "Parameters": { + "markdownDescription": "The parameters for the runtime configuration of the document.", + "title": "Parameters", "type": "object" }, "ScheduleExpression": { @@ -230659,7 +230693,7 @@ "type": "number" }, "SyncCompliance": { - "markdownDescription": "The mode for generating association compliance. You can specify `AUTO` or `MANUAL` . In `AUTO` mode, the system uses the status of the association execution to determine the compliance status. If the association execution runs successfully, then the association is `COMPLIANT` . If the association execution doesn't run successfully, the association is `NON-COMPLIANT` .\n\nIn `MANUAL` mode, you must specify the `AssociationId` as a parameter for the PutComplianceItems API action. In this case, compliance data is not managed by State Manager. It is managed by your direct call to the PutComplianceItems API action.\n\nBy default, all associations use `AUTO` mode.", + "markdownDescription": "The mode for generating association compliance. You can specify `AUTO` or `MANUAL` . In `AUTO` mode, the system uses the status of the association execution to determine the compliance status. If the association execution runs successfully, then the association is `COMPLIANT` . If the association execution doesn't run successfully, the association is `NON-COMPLIANT` .\n\nIn `MANUAL` mode, you must specify the `AssociationId` as a parameter for the `PutComplianceItems` API action. In this case, compliance data is not managed by State Manager. It is managed by your direct call to the `PutComplianceItems` API action.\n\nBy default, all associations use `AUTO` mode.", "title": "SyncCompliance", "type": "string" }, @@ -230667,7 +230701,7 @@ "items": { "$ref": "#/definitions/AWS::SSM::Association.Target" }, - "markdownDescription": "The targets for the association. You must specify the `InstanceId` or `Targets` property. You can target all instances in an AWS account by specifying the `InstanceIds` key with a value of `*` . To view a JSON and a YAML example that targets all instances, see \"Create an association for all managed instances in an AWS account \" on the Examples page.", + "markdownDescription": "The targets for the association. You must specify the `InstanceId` or `Targets` property. You can target all instances in an AWS account by specifying t he `InstanceIds` key with a value of `*` .\n\nSupported formats include the following.\n\n- `Key=InstanceIds,Values=,,`\n- `Key=tag-key,Values=,`\n\nTo view a JSON and a YAML example that targets all instances, see \"Create an association for all managed instances in an AWS account \" on the Examples page.", "title": "Targets", "type": "array" }, @@ -230807,7 +230841,7 @@ "type": "object" }, "DocumentFormat": { - "markdownDescription": "Specify the document format for the request. JSON is the default format.", + "markdownDescription": "Specify the document format for the request. `JSON` is the default format.", "title": "DocumentFormat", "type": "string" }, @@ -231000,7 +231034,7 @@ "type": "string" }, "StartDate": { - "markdownDescription": "The date and time, in ISO-8601 Extended format, for when the maintenance window is scheduled to become active. StartDate allows you to delay activation of the Maintenance Window until the specified future date.", + "markdownDescription": "The date and time, in ISO-8601 Extended format, for when the maintenance window is scheduled to become active. `StartDate` allows you to delay activation of the maintenance window until the specified future date.", "title": "StartDate", "type": "string" }, @@ -231351,7 +231385,7 @@ "type": "string" }, "Parameters": { - "markdownDescription": "The parameters for the AUTOMATION task.", + "markdownDescription": "The parameters for the `AUTOMATION` type task.", "title": "Parameters", "type": "object" } @@ -231568,7 +231602,7 @@ "additionalProperties": false, "properties": { "AllowedPattern": { - "markdownDescription": "A regular expression used to validate the parameter value. For example, for String types with values restricted to numbers, you can specify the following: `AllowedPattern=^\\d+$`", + "markdownDescription": "A regular expression used to validate the parameter value. For example, for `String` types with values restricted to numbers, you can specify the following: `AllowedPattern=^\\d+$`", "title": "AllowedPattern", "type": "string" }, @@ -231583,7 +231617,7 @@ "type": "string" }, "Name": { - "markdownDescription": "The name of the parameter.\n\n> The maximum length constraint listed below includes capacity for additional system attributes that aren't part of the name. The maximum length for a parameter name, including the full length of the parameter ARN, is 1011 characters. For example, the length of the following parameter name is 65 characters, not 20 characters: `arn:aws:ssm:us-east-2:111222333444:parameter/ExampleParameterName`", + "markdownDescription": "The name of the parameter.\n\n> The maximum length constraint listed below includes capacity for additional system attributes that aren't part of the name. The maximum length for a parameter name, including the full length of the parameter Amazon Resource Name (ARN), is 1011 characters. For example, the length of the following parameter name is 65 characters, not 20 characters: `arn:aws:ssm:us-east-2:111222333444:parameter/ExampleParameterName`", "title": "Name", "type": "string" }, @@ -231609,7 +231643,7 @@ "type": "string" }, "Type": { - "markdownDescription": "The type of parameter.\n\n> Although `SecureString` is included in the list of valid values, AWS CloudFormation does *not* currently support creating a `SecureString` parameter type.", + "markdownDescription": "The type of parameter.", "title": "Type", "type": "string" }, @@ -231944,7 +231978,7 @@ "type": "string" }, "KMSKeyArn": { - "markdownDescription": "The ARN of an encryption key for a destination in Amazon S3 . You can use a KMS key to encrypt inventory data in Amazon S3 . You must specify a key that exist in the same region as the destination Amazon S3 bucket.", + "markdownDescription": "The Amazon Resource Name (ARN) of an encryption key for a destination in Amazon S3 . You can use a KMS key to encrypt inventory data in Amazon S3 . You must specify a key that exist in the same AWS Region as the destination Amazon S3 bucket.", "title": "KMSKeyArn", "type": "string" }, @@ -232130,7 +232164,7 @@ "type": "object" }, "ResourceArn": { - "markdownDescription": "Amazon Resource Name (ARN) of the resource to which you want to attach a policy.", + "markdownDescription": "The Amazon Resource Name (ARN) of the resource to which you want to attach a policy.", "title": "ResourceArn", "type": "string" } @@ -232216,7 +232250,7 @@ "type": "array" }, "Type": { - "markdownDescription": "Refers to the type of contact:\n\n- `PERSONAL` : A single, individual contact.\n- `ESCALATION` : An escalation plan.\n- `ONCALL_SCHEDULE` : An on-call schedule.", + "markdownDescription": "The type of contact.\n\n- `PERSONAL` : A single, individual contact.\n- `ESCALATION` : An escalation plan.\n- `ONCALL_SCHEDULE` : An on-call schedule.", "title": "Type", "type": "string" } @@ -232907,7 +232941,7 @@ "additionalProperties": false, "properties": { "SseKmsKeyId": { - "markdownDescription": "The KMS key ID to use to encrypt your replication set.", + "markdownDescription": "The AWS Key Management Service key ID to use to encrypt your replication set.", "title": "SseKmsKeyId", "type": "string" } @@ -233066,7 +233100,7 @@ "items": { "type": "string" }, - "markdownDescription": "The SNS targets that AWS Chatbot uses to notify the chat channel of updates to an incident. You can also make updates to the incident through the chat channel by using the SNS topics", + "markdownDescription": "The Amazon SNS targets that AWS Chatbot uses to notify the chat channel of updates to an incident. You can also make updates to the incident through the chat channel by using the Amazon SNS topics", "title": "ChatbotSns", "type": "array" } @@ -233129,7 +233163,7 @@ "items": { "$ref": "#/definitions/AWS::SSMIncidents::ResponsePlan.NotificationTargetItem" }, - "markdownDescription": "The SNS targets that AWS Chatbot uses to notify the chat channel of updates to an incident. You can also make updates to the incident through the chat channel using the SNS topics.", + "markdownDescription": "The Amazon Simple Notification Service ( Amazon SNS ) targets that AWS Chatbot uses to notify the chat channel of updates to an incident. You can also make updates to the incident through the chat channel using the Amazon SNS topics.", "title": "NotificationTargets", "type": "array" }, @@ -233168,7 +233202,7 @@ "additionalProperties": false, "properties": { "SnsTopicArn": { - "markdownDescription": "The Amazon Resource Name (ARN) of the SNS topic.", + "markdownDescription": "The Amazon Resource Name (ARN) of the Amazon SNS topic.", "title": "SnsTopicArn", "type": "string" } @@ -233224,7 +233258,7 @@ "type": "string" }, "DocumentVersion": { - "markdownDescription": "The automation document's version to use when running.", + "markdownDescription": "The version of the runbook to use when running.", "title": "DocumentVersion", "type": "string" }, @@ -233240,7 +233274,7 @@ "items": { "$ref": "#/definitions/AWS::SSMIncidents::ResponsePlan.SsmParameter" }, - "markdownDescription": "The key-value pair parameters to use when running the automation document.", + "markdownDescription": "The key-value pair parameters to use when running the runbook.", "title": "Parameters", "type": "array" }, @@ -233265,7 +233299,7 @@ "additionalProperties": false, "properties": { "Key": { - "markdownDescription": "The key parameter to use when running the automation document.", + "markdownDescription": "The key parameter to use when running the Automation runbook.", "title": "Key", "type": "string" }, @@ -233273,7 +233307,7 @@ "items": { "type": "string" }, - "markdownDescription": "The value parameter to use when running the automation document.", + "markdownDescription": "The value parameter to use when running the Automation runbook.", "title": "Values", "type": "array" } @@ -236359,7 +236393,7 @@ "additionalProperties": false, "properties": { "FeatureName": { - "markdownDescription": "The name of a feature. The type must be a string. `FeatureName` cannot be any of the following: `is_deleted` , `write_time` , `api_invocation_time` .", + "markdownDescription": "The name of a feature. The type must be a string. `FeatureName` cannot be any of the following: `is_deleted` , `write_time` , `api_invocation_time` .\n\nThe name:\n\n- Must start and end with an alphanumeric character.\n- Can only include alphanumeric characters, underscores, and hyphens. Spaces are not allowed.", "title": "FeatureName", "type": "string" }, @@ -239946,7 +239980,7 @@ "properties": { "Bias": { "$ref": "#/definitions/AWS::SageMaker::ModelPackage.Bias", - "markdownDescription": "Metrics that measure bais in a model.", + "markdownDescription": "Metrics that measure bias in a model.", "title": "Bias" }, "Explainability": { @@ -242915,7 +242949,7 @@ "additionalProperties": false, "properties": { "CognitoClientId": { - "markdownDescription": "An identifier for an application client. You must create the app client ID using Amazon Cognito.", + "markdownDescription": "An identifier for an application client. You must create the app client ID using Amazon Cognito .", "title": "CognitoClientId", "type": "string" }, @@ -255470,9 +255504,7 @@ "additionalProperties": false, "properties": { "JsonBody": { - "$ref": "#/definitions/AWS::WAFv2::LoggingConfiguration.JsonBody", - "markdownDescription": "Redact the request body JSON.", - "title": "JsonBody" + "$ref": "#/definitions/AWS::WAFv2::LoggingConfiguration.JsonBody" }, "Method": { "markdownDescription": "Redact the indicated HTTP method. The method indicates the type of operation that the request is asking the origin to perform.", @@ -255530,18 +255562,12 @@ "additionalProperties": false, "properties": { "InvalidFallbackBehavior": { - "markdownDescription": "What AWS WAF should do if it fails to completely parse the JSON body. The options are the following:\n\n- `EVALUATE_AS_STRING` - Inspect the body as plain text. AWS WAF applies the text transformations and inspection criteria that you defined for the JSON inspection to the body text string.\n- `MATCH` - Treat the web request as matching the rule statement. AWS WAF applies the rule action to the request.\n- `NO_MATCH` - Treat the web request as not matching the rule statement.\n\nIf you don't provide this setting, AWS WAF parses and evaluates the content only up to the first parsing failure that it encounters.\n\nAWS WAF does its best to parse the entire JSON body, but might be forced to stop for reasons such as invalid characters, duplicate keys, truncation, and any content whose root node isn't an object or an array.\n\nAWS WAF parses the JSON in the following examples as two valid key, value pairs:\n\n- Missing comma: `{\"key1\":\"value1\"\"key2\":\"value2\"}`\n- Missing colon: `{\"key1\":\"value1\",\"key2\"\"value2\"}`\n- Extra colons: `{\"key1\"::\"value1\",\"key2\"\"value2\"}`", - "title": "InvalidFallbackBehavior", "type": "string" }, "MatchPattern": { - "$ref": "#/definitions/AWS::WAFv2::LoggingConfiguration.MatchPattern", - "markdownDescription": "The patterns to look for in the JSON body. AWS WAF inspects the results of these pattern matches against the rule inspection criteria.", - "title": "MatchPattern" + "$ref": "#/definitions/AWS::WAFv2::LoggingConfiguration.MatchPattern" }, "MatchScope": { - "markdownDescription": "The parts of the JSON to match against using the `MatchPattern` . If you specify `ALL` , AWS WAF matches against keys and values.\n\n`All` does not require a match to be found in the keys and a match to be found in the values. It requires a match to be found in the keys or the values or both. To require a match in the keys and in the values, use a logical `AND` statement to combine two match rules, one that inspects the keys and another that inspects the values.", - "title": "MatchScope", "type": "string" } }, @@ -255592,16 +255618,12 @@ "additionalProperties": false, "properties": { "All": { - "markdownDescription": "Match all of the elements.\n\nYou must specify either this setting or the `IncludedPaths` setting, but not both.", - "title": "All", "type": "object" }, "IncludedPaths": { "items": { "type": "string" }, - "markdownDescription": "Match only the specified include paths.\n\nProvide the include paths using JSON Pointer syntax. For example, `\"IncludedPaths\": [\"/dogs/0/name\", \"/dogs/1/name\"]` . For information about this syntax, see the Internet Engineering Task Force (IETF) documentation [JavaScript Object Notation (JSON) Pointer](https://docs.aws.amazon.com/https://tools.ietf.org/html/rfc6901) .\n\nYou must specify either this setting or the `All` setting, but not both.\n\n> Don't use this option to include all paths. Instead, use the `All` setting.", - "title": "IncludedPaths", "type": "array" } }, @@ -256941,7 +256963,7 @@ }, "RateBasedStatement": { "$ref": "#/definitions/AWS::WAFv2::RuleGroup.RateBasedStatement", - "markdownDescription": "A rate-based rule counts incoming requests and rate limits requests when they are coming at too fast a rate. The rule categorizes requests according to your aggregation criteria, collects them into aggregation instances, and counts and rate limits the requests for each instance.\n\nYou can specify individual aggregation keys, like IP address or HTTP method. You can also specify aggregation key combinations, like IP address and HTTP method, or HTTP method, query argument, and cookie.\n\nEach unique set of values for the aggregation keys that you specify is a separate aggregation instance, with the value from each key contributing to the aggregation instance definition.\n\nFor example, assume the rule evaluates web requests with the following IP address and HTTP method values:\n\n- IP address 10.1.1.1, HTTP method POST\n- IP address 10.1.1.1, HTTP method GET\n- IP address 127.0.0.0, HTTP method POST\n- IP address 10.1.1.1, HTTP method GET\n\nThe rule would create different aggregation instances according to your aggregation criteria, for example:\n\n- If the aggregation criteria is just the IP address, then each individual address is an aggregation instance, and AWS WAF counts requests separately for each. The aggregation instances and request counts for our example would be the following:\n\n- IP address 10.1.1.1: count 3\n- IP address 127.0.0.0: count 1\n- If the aggregation criteria is HTTP method, then each individual HTTP method is an aggregation instance. The aggregation instances and request counts for our example would be the following:\n\n- HTTP method POST: count 2\n- HTTP method GET: count 2\n- If the aggregation criteria is IP address and HTTP method, then each IP address and each HTTP method would contribute to the combined aggregation instance. The aggregation instances and request counts for our example would be the following:\n\n- IP address 10.1.1.1, HTTP method POST: count 1\n- IP address 10.1.1.1, HTTP method GET: count 2\n- IP address 127.0.0.0, HTTP method POST: count 1\n\nFor any n-tuple of aggregation keys, each unique combination of values for the keys defines a separate aggregation instance, which AWS WAF counts and rate-limits individually.\n\nYou can optionally nest another statement inside the rate-based statement, to narrow the scope of the rule so that it only counts and rate limits requests that match the nested statement. You can use this nested scope-down statement in conjunction with your aggregation key specifications or you can just count and rate limit all requests that match the scope-down statement, without additional aggregation. When you choose to just manage all requests that match a scope-down statement, the aggregation instance is singular for the rule.\n\nYou cannot nest a `RateBasedStatement` inside another statement, for example inside a `NotStatement` or `OrStatement` . You can define a `RateBasedStatement` inside a web ACL and inside a rule group.\n\nFor additional information about the options, see [Rate limiting web requests using rate-based rules](https://docs.aws.amazon.com/waf/latest/developerguide/waf-rate-based-rules.html) in the *AWS WAF Developer Guide* .\n\nIf you only aggregate on the individual IP address or forwarded IP address, you can retrieve the list of IP addresses that AWS WAF is currently rate limiting for a rule through the API call `GetRateBasedStatementManagedKeys` . This option is not available for other aggregation configurations.\n\nAWS WAF tracks and manages web requests separately for each instance of a rate-based rule that you use. For example, if you provide the same rate-based rule settings in two web ACLs, each of the two rule statements represents a separate instance of the rate-based rule and gets its own tracking and management by AWS WAF . If you define a rate-based rule inside a rule group, and then use that rule group in multiple places, each use creates a separate instance of the rate-based rule that gets its own tracking and management by AWS WAF .", + "markdownDescription": "A rate-based rule counts incoming requests and rate limits requests when they are coming at too fast a rate. The rule categorizes requests according to your aggregation criteria, collects them into aggregation instances, and counts and rate limits the requests for each instance.\n\n> If you change any of these settings in a rule that's currently in use, the change resets the rule's rate limiting counts. This can pause the rule's rate limiting activities for up to a minute. \n\nYou can specify individual aggregation keys, like IP address or HTTP method. You can also specify aggregation key combinations, like IP address and HTTP method, or HTTP method, query argument, and cookie.\n\nEach unique set of values for the aggregation keys that you specify is a separate aggregation instance, with the value from each key contributing to the aggregation instance definition.\n\nFor example, assume the rule evaluates web requests with the following IP address and HTTP method values:\n\n- IP address 10.1.1.1, HTTP method POST\n- IP address 10.1.1.1, HTTP method GET\n- IP address 127.0.0.0, HTTP method POST\n- IP address 10.1.1.1, HTTP method GET\n\nThe rule would create different aggregation instances according to your aggregation criteria, for example:\n\n- If the aggregation criteria is just the IP address, then each individual address is an aggregation instance, and AWS WAF counts requests separately for each. The aggregation instances and request counts for our example would be the following:\n\n- IP address 10.1.1.1: count 3\n- IP address 127.0.0.0: count 1\n- If the aggregation criteria is HTTP method, then each individual HTTP method is an aggregation instance. The aggregation instances and request counts for our example would be the following:\n\n- HTTP method POST: count 2\n- HTTP method GET: count 2\n- If the aggregation criteria is IP address and HTTP method, then each IP address and each HTTP method would contribute to the combined aggregation instance. The aggregation instances and request counts for our example would be the following:\n\n- IP address 10.1.1.1, HTTP method POST: count 1\n- IP address 10.1.1.1, HTTP method GET: count 2\n- IP address 127.0.0.0, HTTP method POST: count 1\n\nFor any n-tuple of aggregation keys, each unique combination of values for the keys defines a separate aggregation instance, which AWS WAF counts and rate-limits individually.\n\nYou can optionally nest another statement inside the rate-based statement, to narrow the scope of the rule so that it only counts and rate limits requests that match the nested statement. You can use this nested scope-down statement in conjunction with your aggregation key specifications or you can just count and rate limit all requests that match the scope-down statement, without additional aggregation. When you choose to just manage all requests that match a scope-down statement, the aggregation instance is singular for the rule.\n\nYou cannot nest a `RateBasedStatement` inside another statement, for example inside a `NotStatement` or `OrStatement` . You can define a `RateBasedStatement` inside a web ACL and inside a rule group.\n\nFor additional information about the options, see [Rate limiting web requests using rate-based rules](https://docs.aws.amazon.com/waf/latest/developerguide/waf-rate-based-rules.html) in the *AWS WAF Developer Guide* .\n\nIf you only aggregate on the individual IP address or forwarded IP address, you can retrieve the list of IP addresses that AWS WAF is currently rate limiting for a rule through the API call `GetRateBasedStatementManagedKeys` . This option is not available for other aggregation configurations.\n\nAWS WAF tracks and manages web requests separately for each instance of a rate-based rule that you use. For example, if you provide the same rate-based rule settings in two web ACLs, each of the two rule statements represents a separate instance of the rate-based rule and gets its own tracking and management by AWS WAF . If you define a rate-based rule inside a rule group, and then use that rule group in multiple places, each use creates a separate instance of the rate-based rule that gets its own tracking and management by AWS WAF .", "title": "RateBasedStatement" }, "RegexMatchStatement": { @@ -258806,7 +258828,7 @@ }, "RateBasedStatement": { "$ref": "#/definitions/AWS::WAFv2::WebACL.RateBasedStatement", - "markdownDescription": "A rate-based rule counts incoming requests and rate limits requests when they are coming at too fast a rate. The rule categorizes requests according to your aggregation criteria, collects them into aggregation instances, and counts and rate limits the requests for each instance.\n\nYou can specify individual aggregation keys, like IP address or HTTP method. You can also specify aggregation key combinations, like IP address and HTTP method, or HTTP method, query argument, and cookie.\n\nEach unique set of values for the aggregation keys that you specify is a separate aggregation instance, with the value from each key contributing to the aggregation instance definition.\n\nFor example, assume the rule evaluates web requests with the following IP address and HTTP method values:\n\n- IP address 10.1.1.1, HTTP method POST\n- IP address 10.1.1.1, HTTP method GET\n- IP address 127.0.0.0, HTTP method POST\n- IP address 10.1.1.1, HTTP method GET\n\nThe rule would create different aggregation instances according to your aggregation criteria, for example:\n\n- If the aggregation criteria is just the IP address, then each individual address is an aggregation instance, and AWS WAF counts requests separately for each. The aggregation instances and request counts for our example would be the following:\n\n- IP address 10.1.1.1: count 3\n- IP address 127.0.0.0: count 1\n- If the aggregation criteria is HTTP method, then each individual HTTP method is an aggregation instance. The aggregation instances and request counts for our example would be the following:\n\n- HTTP method POST: count 2\n- HTTP method GET: count 2\n- If the aggregation criteria is IP address and HTTP method, then each IP address and each HTTP method would contribute to the combined aggregation instance. The aggregation instances and request counts for our example would be the following:\n\n- IP address 10.1.1.1, HTTP method POST: count 1\n- IP address 10.1.1.1, HTTP method GET: count 2\n- IP address 127.0.0.0, HTTP method POST: count 1\n\nFor any n-tuple of aggregation keys, each unique combination of values for the keys defines a separate aggregation instance, which AWS WAF counts and rate-limits individually.\n\nYou can optionally nest another statement inside the rate-based statement, to narrow the scope of the rule so that it only counts and rate limits requests that match the nested statement. You can use this nested scope-down statement in conjunction with your aggregation key specifications or you can just count and rate limit all requests that match the scope-down statement, without additional aggregation. When you choose to just manage all requests that match a scope-down statement, the aggregation instance is singular for the rule.\n\nYou cannot nest a `RateBasedStatement` inside another statement, for example inside a `NotStatement` or `OrStatement` . You can define a `RateBasedStatement` inside a web ACL and inside a rule group.\n\nFor additional information about the options, see [Rate limiting web requests using rate-based rules](https://docs.aws.amazon.com/waf/latest/developerguide/waf-rate-based-rules.html) in the *AWS WAF Developer Guide* .\n\nIf you only aggregate on the individual IP address or forwarded IP address, you can retrieve the list of IP addresses that AWS WAF is currently rate limiting for a rule through the API call `GetRateBasedStatementManagedKeys` . This option is not available for other aggregation configurations.\n\nAWS WAF tracks and manages web requests separately for each instance of a rate-based rule that you use. For example, if you provide the same rate-based rule settings in two web ACLs, each of the two rule statements represents a separate instance of the rate-based rule and gets its own tracking and management by AWS WAF . If you define a rate-based rule inside a rule group, and then use that rule group in multiple places, each use creates a separate instance of the rate-based rule that gets its own tracking and management by AWS WAF .", + "markdownDescription": "A rate-based rule counts incoming requests and rate limits requests when they are coming at too fast a rate. The rule categorizes requests according to your aggregation criteria, collects them into aggregation instances, and counts and rate limits the requests for each instance.\n\n> If you change any of these settings in a rule that's currently in use, the change resets the rule's rate limiting counts. This can pause the rule's rate limiting activities for up to a minute. \n\nYou can specify individual aggregation keys, like IP address or HTTP method. You can also specify aggregation key combinations, like IP address and HTTP method, or HTTP method, query argument, and cookie.\n\nEach unique set of values for the aggregation keys that you specify is a separate aggregation instance, with the value from each key contributing to the aggregation instance definition.\n\nFor example, assume the rule evaluates web requests with the following IP address and HTTP method values:\n\n- IP address 10.1.1.1, HTTP method POST\n- IP address 10.1.1.1, HTTP method GET\n- IP address 127.0.0.0, HTTP method POST\n- IP address 10.1.1.1, HTTP method GET\n\nThe rule would create different aggregation instances according to your aggregation criteria, for example:\n\n- If the aggregation criteria is just the IP address, then each individual address is an aggregation instance, and AWS WAF counts requests separately for each. The aggregation instances and request counts for our example would be the following:\n\n- IP address 10.1.1.1: count 3\n- IP address 127.0.0.0: count 1\n- If the aggregation criteria is HTTP method, then each individual HTTP method is an aggregation instance. The aggregation instances and request counts for our example would be the following:\n\n- HTTP method POST: count 2\n- HTTP method GET: count 2\n- If the aggregation criteria is IP address and HTTP method, then each IP address and each HTTP method would contribute to the combined aggregation instance. The aggregation instances and request counts for our example would be the following:\n\n- IP address 10.1.1.1, HTTP method POST: count 1\n- IP address 10.1.1.1, HTTP method GET: count 2\n- IP address 127.0.0.0, HTTP method POST: count 1\n\nFor any n-tuple of aggregation keys, each unique combination of values for the keys defines a separate aggregation instance, which AWS WAF counts and rate-limits individually.\n\nYou can optionally nest another statement inside the rate-based statement, to narrow the scope of the rule so that it only counts and rate limits requests that match the nested statement. You can use this nested scope-down statement in conjunction with your aggregation key specifications or you can just count and rate limit all requests that match the scope-down statement, without additional aggregation. When you choose to just manage all requests that match a scope-down statement, the aggregation instance is singular for the rule.\n\nYou cannot nest a `RateBasedStatement` inside another statement, for example inside a `NotStatement` or `OrStatement` . You can define a `RateBasedStatement` inside a web ACL and inside a rule group.\n\nFor additional information about the options, see [Rate limiting web requests using rate-based rules](https://docs.aws.amazon.com/waf/latest/developerguide/waf-rate-based-rules.html) in the *AWS WAF Developer Guide* .\n\nIf you only aggregate on the individual IP address or forwarded IP address, you can retrieve the list of IP addresses that AWS WAF is currently rate limiting for a rule through the API call `GetRateBasedStatementManagedKeys` . This option is not available for other aggregation configurations.\n\nAWS WAF tracks and manages web requests separately for each instance of a rate-based rule that you use. For example, if you provide the same rate-based rule settings in two web ACLs, each of the two rule statements represents a separate instance of the rate-based rule and gets its own tracking and management by AWS WAF . If you define a rate-based rule inside a rule group, and then use that rule group in multiple places, each use creates a separate instance of the rate-based rule that gets its own tracking and management by AWS WAF .", "title": "RateBasedStatement" }, "RegexMatchStatement": { diff --git a/tests/unit/test_region_configuration.py b/tests/unit/test_region_configuration.py index fd00b608d..e9bcf50b3 100644 --- a/tests/unit/test_region_configuration.py +++ b/tests/unit/test_region_configuration.py @@ -26,6 +26,7 @@ def test_when_apigw_edge_configuration_supported(self, partition): ["aws-us-gov"], ["aws-iso"], ["aws-iso-b"], + ["aws-iso-e"], ] ) def test_when_apigw_edge_configuration_is_not_supported(self, partition): diff --git a/tests/unit/translator/test_arn_generator.py b/tests/unit/translator/test_arn_generator.py index b91bb3143..f31cb7f26 100644 --- a/tests/unit/translator/test_arn_generator.py +++ b/tests/unit/translator/test_arn_generator.py @@ -14,6 +14,7 @@ class TestArnGenerator(TestCase): ["us-gov-west-1", "aws-us-gov"], ["us-iso-east-1", "aws-iso"], ["us-isob-east-1", "aws-iso-b"], + ["eu-isoe-west-1", "aws-iso-e"], ] ) def test_get_partition_name(self, region, expected_partition): @@ -27,6 +28,7 @@ def test_get_partition_name(self, region, expected_partition): ["us-gov-west-1", "aws-us-gov"], ["us-iso-east-1", "aws-iso"], ["us-isob-east-1", "aws-iso-b"], + ["eu-isoe-west-1", "aws-iso-e"], ] ) def test_get_partition_name_when_region_not_provided(self, region, expected_partition):